Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Fri, 16 Dec 2011 07:11:14 +0000 (02:11 -0500)
committerDavid S. Miller <davem@davemloft.net>
Fri, 16 Dec 2011 07:11:14 +0000 (02:11 -0500)
Conflicts:
drivers/net/ethernet/freescale/fsl_pq_mdio.c
net/batman-adv/translation-table.c
net/ipv6/route.c

1074 files changed:
Documentation/cgroups/memory.txt
Documentation/cgroups/net_prio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/calxeda-xgmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/can/cc770.txt [new file with mode: 0644]
Documentation/feature-removal-schedule.txt
Documentation/networking/00-INDEX
Documentation/networking/batman-adv.txt
Documentation/networking/ieee802154.txt
Documentation/networking/ifenslave.c
Documentation/networking/ip-sysctl.txt
Documentation/networking/openvswitch.txt [new file with mode: 0644]
Documentation/networking/team.txt [new file with mode: 0644]
MAINTAINERS
arch/alpha/include/asm/socket.h
arch/arm/include/asm/socket.h
arch/avr32/include/asm/socket.h
arch/cris/include/asm/socket.h
arch/frv/include/asm/socket.h
arch/h8300/include/asm/socket.h
arch/ia64/include/asm/socket.h
arch/m32r/include/asm/socket.h
arch/m68k/include/asm/socket.h
arch/mips/include/asm/socket.h
arch/mn10300/include/asm/socket.h
arch/parisc/include/asm/socket.h
arch/powerpc/boot/dts/tqm8548-bigflash.dts
arch/powerpc/boot/dts/tqm8548.dts
arch/powerpc/boot/dts/tqm8xx.dts
arch/powerpc/include/asm/socket.h
arch/s390/include/asm/socket.h
arch/sparc/include/asm/socket.h
arch/xtensa/include/asm/socket.h
drivers/atm/iphase.c
drivers/bcma/host_pci.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/btusb.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_vhci.c
drivers/ieee802154/fakehard.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/isdn/gigaset/i4l.c
drivers/lguest/lguest_device.c
drivers/misc/eeprom/eeprom_93cx6.c
drivers/misc/sgi-xp/xpnet.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_ipv6.c [deleted file]
drivers/net/bonding/bond_main.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_shmcore.c
drivers/net/caif/caif_spi.c
drivers/net/can/Kconfig
drivers/net/can/Makefile
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/Kconfig [new file with mode: 0644]
drivers/net/can/cc770/Makefile [new file with mode: 0644]
drivers/net/can/cc770/cc770.c [new file with mode: 0644]
drivers/net/can/cc770/cc770.h [new file with mode: 0644]
drivers/net/can/cc770/cc770_isa.c [new file with mode: 0644]
drivers/net/can/cc770/cc770_platform.c [new file with mode: 0644]
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/mscan/mscan.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/sja1000_isa.c
drivers/net/can/sja1000/sja1000_of_platform.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/dsa/Kconfig [new file with mode: 0644]
drivers/net/dsa/Makefile [new file with mode: 0644]
drivers/net/dsa/mv88e6060.c [moved from net/dsa/mv88e6060.c with 96% similarity]
drivers/net/dsa/mv88e6123_61_65.c [moved from net/dsa/mv88e6123_61_65.c with 96% similarity]
drivers/net/dsa/mv88e6131.c [moved from net/dsa/mv88e6131.c with 96% similarity]
drivers/net/dsa/mv88e6xxx.c [moved from net/dsa/mv88e6xxx.c with 93% similarity]
drivers/net/dsa/mv88e6xxx.h [moved from net/dsa/mv88e6xxx.h with 95% similarity]
drivers/net/dummy.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/8390.h
drivers/net/ethernet/8390/apne.c
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/8390/es3210.c
drivers/net/ethernet/8390/hp-plus.c
drivers/net/ethernet/8390/hp.c
drivers/net/ethernet/8390/hydra.c
drivers/net/ethernet/8390/lne390.c
drivers/net/ethernet/8390/ne-h8300.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/8390/ne2.c
drivers/net/ethernet/8390/ne2k-pci.c
drivers/net/ethernet/8390/ne3210.c
drivers/net/ethernet/8390/stnic.c
drivers/net/ethernet/8390/zorro8390.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/amd8111e.h
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/sunlance.c
drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/atheros/atlx/atlx.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad_ethtool.c
drivers/net/ethernet/brocade/bna/cna.h
drivers/net/ethernet/calxeda/Kconfig [new file with mode: 0644]
drivers/net/ethernet/calxeda/Makefile [new file with mode: 0644]
drivers/net/ethernet/calxeda/xgmac.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb/sge.c
drivers/net/ethernet/chelsio/cxgb/sge.h
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
drivers/net/ethernet/chelsio/cxgb3/l2t.c
drivers/net/ethernet/chelsio/cxgb3/l2t.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic_dev.c
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
drivers/net/ethernet/i825xx/eepro.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/Makefile
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_port.h
drivers/net/ethernet/mellanox/mlx4/en_resources.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/icm.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/sense.c
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/micrel/Kconfig
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ks8851.h
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/jazzsonic.c
drivers/net/ethernet/natsemi/macsonic.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/ethernet/xircom/xirc2ps_cs.c
drivers/net/ifb.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/pxaficp_ir.c
drivers/net/irda/sh_irda.c
drivers/net/irda/sh_sir.c
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/mii.c
drivers/net/phy/mdio-bitbang.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/phy_device.c
drivers/net/ppp/pptp.c
drivers/net/team/Kconfig [new file with mode: 0644]
drivers/net/team/Makefile [new file with mode: 0644]
drivers/net/team/team.c [new file with mode: 0644]
drivers/net/team/team_mode_activebackup.c [new file with mode: 0644]
drivers/net/team/team_mode_roundrobin.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/Makefile
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath5k/ani.c
drivers/net/wireless/ath/ath5k/ani.h
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/attach.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/caps.c
drivers/net/wireless/ath/ath5k/desc.c
drivers/net/wireless/ath/ath5k/desc.h
drivers/net/wireless/ath/ath5k/dma.c
drivers/net/wireless/ath/ath5k/gpio.c
drivers/net/wireless/ath/ath5k/initvals.c
drivers/net/wireless/ath/ath5k/pci.c
drivers/net/wireless/ath/ath5k/pcu.c
drivers/net/wireless/ath/ath5k/phy.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath5k/reg.h
drivers/net/wireless/ath/ath5k/reset.c
drivers/net/wireless/ath/ath5k/rfbuffer.h
drivers/net/wireless/ath/ath5k/rfgain.h
drivers/net/wireless/ath/ath6kl/Makefile
drivers/net/wireless/ath/ath6kl/bmi.c
drivers/net/wireless/ath/ath6kl/bmi.h
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/cfg80211.h
drivers/net/wireless/ath/ath6kl/common.h
drivers/net/wireless/ath/ath6kl/core.h
drivers/net/wireless/ath/ath6kl/debug.c
drivers/net/wireless/ath/ath6kl/debug.h
drivers/net/wireless/ath/ath6kl/hif-ops.h
drivers/net/wireless/ath/ath6kl/hif.c [moved from drivers/net/wireless/ath/ath6kl/htc_hif.c with 80% similarity]
drivers/net/wireless/ath/ath6kl/hif.h
drivers/net/wireless/ath/ath6kl/htc.c
drivers/net/wireless/ath/ath6kl/htc.h
drivers/net/wireless/ath/ath6kl/htc_hif.h [deleted file]
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath6kl/sdio.c
drivers/net/wireless/ath/ath6kl/target.h
drivers/net/wireless/ath/ath6kl/txrx.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath6kl/wmi.h
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_mci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/btcoex.c
drivers/net/wireless/ath/ath9k/btcoex.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/mci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h [deleted file]
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
drivers/net/wireless/brcm80211/brcmsmac/channel.c
drivers/net/wireless/brcm80211/brcmsmac/dma.c
drivers/net/wireless/brcm80211/brcmsmac/dma.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/main.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/brcm80211/brcmsmac/pmu.c
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/brcm80211/brcmsmac/rate.h
drivers/net/wireless/brcm80211/brcmsmac/srom.c
drivers/net/wireless/brcm80211/brcmsmac/srom.h
drivers/net/wireless/brcm80211/brcmutil/utils.c
drivers/net/wireless/brcm80211/include/brcmu_utils.h
drivers/net/wireless/brcm80211/include/defs.h
drivers/net/wireless/brcm80211/include/soc.h
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw.h
drivers/net/wireless/iwlegacy/3945-debug.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/3945-mac.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/3945-rs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/3945.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/3945.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/4965-calib.c [moved from drivers/net/wireless/iwlegacy/iwl-4965-calib.c with 55% similarity]
drivers/net/wireless/iwlegacy/4965-debug.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/4965-mac.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/4965-rs.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/4965.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/4965.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/Kconfig
drivers/net/wireless/iwlegacy/Makefile
drivers/net/wireless/iwlegacy/commands.h [moved from drivers/net/wireless/iwlegacy/iwl-commands.h with 79% similarity]
drivers/net/wireless/iwlegacy/common.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/common.h [new file with mode: 0644]
drivers/net/wireless/iwlegacy/csr.h [moved from drivers/net/wireless/iwlegacy/iwl-csr.h with 84% similarity]
drivers/net/wireless/iwlegacy/debug.c [new file with mode: 0644]
drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945-fh.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945-hw.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945-led.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945-led.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945-rs.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-3945.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-calib.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-hw.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-led.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-led.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-lib.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-rs.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-rx.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-sta.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-tx.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965-ucode.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-4965.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-core.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-core.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-debug.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-debugfs.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-dev.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-devtrace.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-devtrace.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-eeprom.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-eeprom.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-fh.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-hcmd.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-helpers.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-io.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-led.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-led.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-legacy-rs.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-power.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-power.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-rx.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-scan.c [deleted file]
drivers/net/wireless/iwlegacy/iwl-spectrum.h
drivers/net/wireless/iwlegacy/iwl-sta.c
drivers/net/wireless/iwlegacy/iwl-sta.h [deleted file]
drivers/net/wireless/iwlegacy/iwl-tx.c [deleted file]
drivers/net/wireless/iwlegacy/iwl3945-base.c [deleted file]
drivers/net/wireless/iwlegacy/iwl4965-base.c [deleted file]
drivers/net/wireless/iwlegacy/prph.h [moved from drivers/net/wireless/iwlegacy/iwl-prph.h with 83% similarity]
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.c
drivers/net/wireless/iwlwifi/iwl-agn-calib.h
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-rx.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-cfg.h
drivers/net/wireless/iwlwifi/iwl-commands.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-debugfs.c
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-devtrace.c
drivers/net/wireless/iwlwifi/iwl-devtrace.h
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-mac80211.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-pci.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-shared.h
drivers/net/wireless/iwlwifi/iwl-testmode.c [moved from drivers/net/wireless/iwlwifi/iwl-sv-open.c with 78% similarity]
drivers/net/wireless/iwlwifi/iwl-testmode.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/iwl-ucode.c [moved from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c with 64% similarity]
drivers/net/wireless/iwmc3200wifi/cfg80211.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/ethtool.c
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cfp.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/orinoco/scan.c
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/isl_ioctl.h
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rayctl.h
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/wl1251/spi.c
drivers/net/wireless/wl12xx/Kconfig
drivers/net/wireless/wl12xx/Makefile
drivers/net/wireless/wl12xx/acx.c
drivers/net/wireless/wl12xx/acx.h
drivers/net/wireless/wl12xx/boot.c
drivers/net/wireless/wl12xx/cmd.c
drivers/net/wireless/wl12xx/cmd.h
drivers/net/wireless/wl12xx/conf.h
drivers/net/wireless/wl12xx/debug.h [new file with mode: 0644]
drivers/net/wireless/wl12xx/debugfs.c
drivers/net/wireless/wl12xx/event.c
drivers/net/wireless/wl12xx/event.h
drivers/net/wireless/wl12xx/init.c
drivers/net/wireless/wl12xx/init.h
drivers/net/wireless/wl12xx/io.c
drivers/net/wireless/wl12xx/io.h
drivers/net/wireless/wl12xx/main.c
drivers/net/wireless/wl12xx/ps.c
drivers/net/wireless/wl12xx/ps.h
drivers/net/wireless/wl12xx/reg.h
drivers/net/wireless/wl12xx/rx.c
drivers/net/wireless/wl12xx/scan.c
drivers/net/wireless/wl12xx/scan.h
drivers/net/wireless/wl12xx/sdio.c
drivers/net/wireless/wl12xx/sdio_test.c [deleted file]
drivers/net/wireless/wl12xx/spi.c
drivers/net/wireless/wl12xx/testmode.c
drivers/net/wireless/wl12xx/tx.c
drivers/net/wireless/wl12xx/tx.h
drivers/net/wireless/wl12xx/wl12xx.h
drivers/net/wireless/wl12xx/wl12xx_80211.h
drivers/net/wireless/wl12xx/wl12xx_platform_data.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/pn533.c
drivers/s390/kvm/kvm_virtio.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/usb/gadget/f_phonet.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_pci.c
fs/dlm/lowcomms.c
include/asm-generic/socket.h
include/linux/Kbuild
include/linux/atmdev.h
include/linux/can/platform/cc770.h [new file with mode: 0644]
include/linux/cgroup_subsys.h
include/linux/dynamic_queue_limits.h [new file with mode: 0644]
include/linux/eeprom_93cx6.h
include/linux/errqueue.h
include/linux/ethtool.h
include/linux/genetlink.h
include/linux/ieee80211.h
include/linux/if.h
include/linux/if_ether.h
include/linux/if_team.h [new file with mode: 0644]
include/linux/if_vlan.h
include/linux/inet_diag.h
include/linux/ipv6.h
include/linux/lockd/lockd.h
include/linux/mdio-bitbang.h
include/linux/mdio-gpio.h
include/linux/memcontrol.h
include/linux/mii.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/neighbour.h
include/linux/netdev_features.h [new file with mode: 0644]
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netlink.h
include/linux/nl80211.h
include/linux/openvswitch.h [new file with mode: 0644]
include/linux/phonet.h
include/linux/pkt_sched.h
include/linux/skbuff.h
include/linux/sock_diag.h [new file with mode: 0644]
include/linux/sunrpc/clnt.h
include/linux/virtio_config.h
include/linux/wl12xx.h
include/net/atmclip.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/caif/caif_dev.h
include/net/caif/caif_layer.h
include/net/caif/caif_spi.h
include/net/caif/cfcnfg.h
include/net/caif/cfserl.h
include/net/cfg80211.h
include/net/dsa.h
include/net/dst.h
include/net/flow.h
include/net/flow_keys.h [new file with mode: 0644]
include/net/genetlink.h
include/net/icmp.h
include/net/ieee80211_radiotap.h
include/net/ieee802154.h
include/net/inet6_hashtables.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_route.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/net_namespace.h
include/net/netfilter/nf_tproxy_core.h
include/net/netns/ipv4.h
include/net/netns/mib.h
include/net/netns/xfrm.h
include/net/netprio_cgroup.h [new file with mode: 0644]
include/net/nfc/nci.h
include/net/nfc/nci_core.h
include/net/protocol.h
include/net/red.h
include/net/regulatory.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/snmp.h
include/net/sock.h
include/net/tcp.h
include/net/tcp_memcontrol.h [new file with mode: 0644]
include/net/udp.h
include/net/xfrm.h
init/Kconfig
lib/Kconfig
lib/Makefile
lib/dynamic_queue_limits.c [new file with mode: 0644]
lib/reciprocal_div.c
lib/vsprintf.c
mm/memcontrol.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_gvrp.c
net/8021q/vlan_netlink.c
net/8021q/vlanproc.c
net/Kconfig
net/Makefile
net/atm/atm_misc.c
net/atm/br2684.c
net/atm/clip.c
net/atm/common.c
net/atm/common.h
net/atm/pppoatm.c
net/ax25/af_ax25.c
net/batman-adv/bat_sysfs.c
net/batman-adv/bitarray.c
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/main.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/routing.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/batman-adv/vis.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sysfs.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/bluetooth/smp.c
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_private.h
net/bridge/netfilter/ebt_ip6.c
net/bridge/netfilter/ebt_log.c
net/caif/Kconfig
net/caif/Makefile
net/caif/caif_dev.c
net/caif/caif_usb.c [new file with mode: 0644]
net/caif/cfcnfg.c
net/caif/cfpkt_skbuff.c
net/caif/cfrfml.c
net/caif/cfserl.c
net/core/Makefile
net/core/dev.c
net/core/dst.c
net/core/ethtool.c
net/core/flow_dissector.c [new file with mode: 0644]
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/netprio_cgroup.c [new file with mode: 0644]
net/core/pktgen.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c [new file with mode: 0644]
net/core/sysctl_net_core.c
net/dccp/dccp.h
net/dccp/diag.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dccp/probe.c
net/decnet/dn_neigh.c
net/decnet/dn_route.c
net/dsa/Kconfig
net/dsa/Makefile
net/dsa/dsa.c
net/dsa/dsa_priv.h
net/dsa/tag_dsa.c
net/dsa/tag_edsa.c
net/dsa/tag_trailer.c
net/econet/af_econet.c
net/ieee802154/6lowpan.c
net/ieee802154/6lowpan.h
net/ieee802154/dgram.c
net/ieee802154/raw.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c [new file with mode: 0644]
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/tunnel4.c
net/ipv4/udp.c
net/ipv4/udp_diag.c [new file with mode: 0644]
net/ipv4/xfrm4_tunnel.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/anycast.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/exthdrs_core.c
net/ipv6/fib6_rules.c
net/ipv6/icmp.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/mip6.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_queue.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_mode_beet.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_state.c
net/irda/irttp.c
net/key/af_key.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/driver-trace.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/pm.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wep.c
net/mac80211/wme.c
net/mac80211/wme.h
net/mac80211/work.c
net/mac80211/wpa.c
net/netfilter/core.c
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/xt_AUDIT.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_TEE.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_socket.c
net/netlabel/netlabel_addrlist.c
net/netlabel/netlabel_addrlist.h
net/netlabel/netlabel_domainhash.c
net/netlabel/netlabel_domainhash.h
net/netlabel/netlabel_kapi.c
net/netlabel/netlabel_mgmt.c
net/netlabel/netlabel_unlabeled.c
net/netlink/genetlink.c
net/netrom/af_netrom.c
net/netrom/nr_route.c
net/nfc/core.c
net/nfc/nci/core.c
net/nfc/nci/data.c
net/nfc/nci/lib.c
net/nfc/nci/ntf.c
net/nfc/nci/rsp.c
net/nfc/netlink.c
net/nfc/nfc.h
net/nfc/rawsock.c
net/openvswitch/Kconfig [new file with mode: 0644]
net/openvswitch/Makefile [new file with mode: 0644]
net/openvswitch/actions.c [new file with mode: 0644]
net/openvswitch/datapath.c [new file with mode: 0644]
net/openvswitch/datapath.h [new file with mode: 0644]
net/openvswitch/dp_notify.c [new file with mode: 0644]
net/openvswitch/flow.c [new file with mode: 0644]
net/openvswitch/flow.h [new file with mode: 0644]
net/openvswitch/vport-internal_dev.c [new file with mode: 0644]
net/openvswitch/vport-internal_dev.h [new file with mode: 0644]
net/openvswitch/vport-netdev.c [new file with mode: 0644]
net/openvswitch/vport-netdev.h [new file with mode: 0644]
net/openvswitch/vport.c [new file with mode: 0644]
net/openvswitch/vport.h [new file with mode: 0644]
net/packet/af_packet.c
net/phonet/pep.c
net/rfkill/core.c
net/rfkill/rfkill-gpio.c
net/rfkill/rfkill-regulator.c
net/rxrpc/ar-key.c
net/sched/cls_flow.c
net/sched/sch_choke.c
net/sched/sch_generic.c
net/sched/sch_gred.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_teql.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/socket.c
net/sunrpc/addr.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcauth_unix.c
net/sunrpc/svcsock.c
net/wireless/Kconfig
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/mesh.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/util.c
net/wireless/wext-compat.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
security/lsm_audit.c
security/selinux/hooks.c
security/selinux/netnode.c

index cc0ebc5..6922b6c 100644 (file)
@@ -44,8 +44,9 @@ Features:
  - oom-killer disable knob and oom-notifier
  - Root cgroup has no limit controls.
 
- Kernel memory and Hugepages are not under control yet. We just manage
- pages on LRU. To add more controls, we have to take care of performance.
+ Hugepages is not under control yet. We just manage pages on LRU. To add more
+ controls, we have to take care of performance. Kernel memory support is work
+ in progress, and the current version provides basically functionality.
 
 Brief summary of control files.
 
@@ -56,8 +57,11 @@ Brief summary of control files.
                                 (See 5.5 for details)
  memory.memsw.usage_in_bytes    # show current res_counter usage for memory+Swap
                                 (See 5.5 for details)
+ memory.kmem.usage_in_bytes     # show current res_counter usage for kmem only.
+                                (See 2.7 for details)
  memory.limit_in_bytes          # set/show limit of memory usage
  memory.memsw.limit_in_bytes    # set/show limit of memory+Swap usage
+ memory.kmem.limit_in_bytes     # if allowed, set/show limit of kernel memory
  memory.failcnt                         # show the number of memory usage hits limits
  memory.memsw.failcnt           # show the number of memory+Swap hits limits
  memory.max_usage_in_bytes      # show max memory usage recorded
@@ -72,6 +76,11 @@ Brief summary of control files.
  memory.oom_control             # set/show oom controls.
  memory.numa_stat               # show the number of memory usage per numa node
 
+ memory.independent_kmem_limit  # select whether or not kernel memory limits are
+                                  independent of user limits
+ memory.kmem.tcp.limit_in_bytes  # set/show hard limit for tcp buf memory
+ memory.kmem.tcp.usage_in_bytes  # show current tcp buf memory allocation
+
 1. History
 
 The memory controller has a long history. A request for comments for the memory
@@ -255,6 +264,39 @@ When oom event notifier is registered, event will be delivered.
   per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
   zone->lru_lock, it has no lock of its own.
 
+2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+
+With the Kernel memory extension, the Memory Controller is able to limit
+the amount of kernel memory used by the system. Kernel memory is fundamentally
+different than user memory, since it can't be swapped out, which makes it
+possible to DoS the system by consuming too much of this precious resource.
+
+Some kernel memory resources may be accounted and limited separately from the
+main "kmem" resource. For instance, a slab cache that is considered important
+enough to be limited separately may have its own knobs.
+
+Kernel memory limits are not imposed for the root cgroup. Usage for the root
+cgroup may or may not be accounted.
+
+Memory limits as specified by the standard Memory Controller may or may not
+take kernel memory into consideration. This is achieved through the file
+memory.independent_kmem_limit. A Value different than 0 will allow for kernel
+memory to be controlled separately.
+
+When kernel memory limits are not independent, the limit values set in
+memory.kmem files are ignored.
+
+Currently no soft limit is implemented for kernel memory. It is future work
+to trigger slab reclaim when those limits are reached.
+
+2.7.1 Current Kernel Memory resources accounted
+
+* sockets memory pressure: some sockets protocols have memory pressure
+thresholds. The Memory Controller allows them to be controlled individually
+per cgroup, instead of globally.
+
+* tcp memory pressure: sockets memory pressure for the tcp protocol.
+
 3. User Interface
 
 0. Configuration
diff --git a/Documentation/cgroups/net_prio.txt b/Documentation/cgroups/net_prio.txt
new file mode 100644 (file)
index 0000000..01b3226
--- /dev/null
@@ -0,0 +1,53 @@
+Network priority cgroup
+-------------------------
+
+The Network priority cgroup provides an interface to allow an administrator to
+dynamically set the priority of network traffic generated by various
+applications
+
+Nominally, an application would set the priority of its traffic via the
+SO_PRIORITY socket option.  This however, is not always possible because:
+
+1) The application may not have been coded to set this value
+2) The priority of application traffic is often a site-specific administrative
+   decision rather than an application defined one.
+
+This cgroup allows an administrator to assign a process to a group which defines
+the priority of egress traffic on a given interface. Network priority groups can
+be created by first mounting the cgroup filesystem.
+
+# mount -t cgroup -onet_prio none /sys/fs/cgroup/net_prio
+
+With the above step, the initial group acting as the parent accounting group
+becomes visible at '/sys/fs/cgroup/net_prio'.  This group includes all tasks in
+the system. '/sys/fs/cgroup/net_prio/tasks' lists the tasks in this cgroup.
+
+Each net_prio cgroup contains two files that are subsystem specific
+
+net_prio.prioidx
+This file is read-only, and is simply informative.  It contains a unique integer
+value that the kernel uses as an internal representation of this cgroup.
+
+net_prio.ifpriomap
+This file contains a map of the priorities assigned to traffic originating from
+processes in this group and egressing the system on various interfaces. It
+contains a list of tuples in the form <ifname priority>.  Contents of this file
+can be modified by echoing a string into the file using the same tuple format.
+for example:
+
+echo "eth0 5" > /sys/fs/cgroups/net_prio/iscsi/net_prio.ifpriomap
+
+This command would force any traffic originating from processes belonging to the
+iscsi net_prio cgroup and egressing on interface eth0 to have the priority of
+said traffic set to the value 5. The parent accounting group also has a
+writeable 'net_prio.ifpriomap' file that can be used to set a system default
+priority.
+
+Priorities are set immediately prior to queueing a frame to the device
+queueing discipline (qdisc) so priorities will be assigned prior to the hardware
+queue selection being made.
+
+One usage for the net_prio cgroup is with mqprio qdisc allowing application
+traffic to be steered to hardware/driver based traffic classes. These mappings
+can then be managed by administrators or other networking protocols such as
+DCBX.
diff --git a/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
new file mode 100644 (file)
index 0000000..411727a
--- /dev/null
@@ -0,0 +1,15 @@
+* Calxeda Highbank 10Gb XGMAC Ethernet
+
+Required properties:
+- compatible : Should be "calxeda,hb-xgmac"
+- reg : Address and length of the register set for the device
+- interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt.
+  The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt.
+
+Example:
+
+ethernet@fff50000 {
+        compatible = "calxeda,hb-xgmac";
+        reg = <0xfff50000 0x1000>;
+        interrupts = <0 77 4  0 78 4  0 79 4>;
+};
diff --git a/Documentation/devicetree/bindings/net/can/cc770.txt b/Documentation/devicetree/bindings/net/can/cc770.txt
new file mode 100644 (file)
index 0000000..77027bf
--- /dev/null
@@ -0,0 +1,53 @@
+Memory mapped Bosch CC770 and Intel AN82527 CAN controller
+
+Note: The CC770 is a CAN controller from Bosch, which is 100%
+compatible with the old AN82527 from Intel, but with "bugs" being fixed.
+
+Required properties:
+
+- compatible : should be "bosch,cc770" for the CC770 and "intc,82527"
+       for the AN82527.
+
+- reg : should specify the chip select, address offset and size required
+       to map the registers of the controller. The size is usually 0x80.
+
+- interrupts : property with a value describing the interrupt source
+       (number and sensitivity) required for the controller.
+
+Optional properties:
+
+- bosch,external-clock-frequency : frequency of the external oscillator
+       clock in Hz. Note that the internal clock frequency used by the
+       controller is half of that value. If not specified, a default
+       value of 16000000 (16 MHz) is used.
+
+- bosch,clock-out-frequency : slock frequency in Hz on the CLKOUT pin.
+       If not specified or if the specified value is 0, the CLKOUT pin
+       will be disabled.
+
+- bosch,slew-rate : slew rate of the CLKOUT signal. If not specified,
+       a resonable value will be calculated.
+
+- bosch,disconnect-rx0-input : see data sheet.
+
+- bosch,disconnect-rx1-input : see data sheet.
+
+- bosch,disconnect-tx1-output : see data sheet.
+
+- bosch,polarity-dominant : see data sheet.
+
+- bosch,divide-memory-clock : see data sheet.
+
+- bosch,iso-low-speed-mux : see data sheet.
+
+For further information, please have a look to the CC770 or AN82527.
+
+Examples:
+
+can@3,100 {
+       compatible = "bosch,cc770";
+       reg = <3 0x100 0x80>;
+       interrupts = <2 0>;
+       interrupt-parent = <&mpic>;
+       bosch,external-clock-frequency = <16000000>;
+};
index 3d84912..33f7327 100644 (file)
@@ -263,8 +263,7 @@ Who:        Ravikiran Thirumalai <kiran@scalex86.org>
 
 What:  Code that is now under CONFIG_WIRELESS_EXT_SYSFS
        (in net/core/net-sysfs.c)
-When:  After the only user (hal) has seen a release with the patches
-       for enough time, probably some time in 2010.
+When:  3.5
 Why:   Over 1K .text/.data size reduction, data is available in other
        ways (ioctls)
 Who:   Johannes Berg <johannes@sipsolutions.net>
index bbce121..9ad9dde 100644 (file)
@@ -144,6 +144,8 @@ nfc.txt
        - The Linux Near Field Communication (NFS) subsystem.
 olympic.txt
        - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info.
+openvswitch.txt
+       - Open vSwitch developer documentation.
 operstates.txt
        - Overview of network interface operational states.
 packet_mmap.txt
index c86d03f..221ad0c 100644 (file)
@@ -200,15 +200,16 @@ abled  during run time. Following log_levels are defined:
 
 0 - All  debug  output  disabled
 1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or tt entry added / changed / deleted
-3 - Enable all messages
+2 - Enable messages related to route added / changed / deleted
+4 - Enable messages related to translation table operations
+7 - Enable all messages
 
 The debug output can be changed at runtime  using  the  file
 /sys/class/net/bat0/mesh/log_level. e.g.
 
 # echo 2 > /sys/class/net/bat0/mesh/log_level
 
-will enable debug messages for when routes or TTs change.
+will enable debug messages for when routes change.
 
 
 BATCTL
index f41ea24..1dc1c24 100644 (file)
@@ -78,3 +78,30 @@ in software. This is currently WIP.
 
 See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
 
+6LoWPAN Linux implementation
+============================
+
+The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+octets of actual MAC payload once security is turned on, on a wireless link
+with a link throughput of 250 kbps or less.  The 6LoWPAN adaptation format
+[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
+taking into account limited bandwidth, memory, or energy resources that are
+expected in applications such as wireless Sensor Networks.  [RFC4944] defines
+a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header
+to support the IPv6 minimum MTU requirement [RFC2460], and stateless header
+compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the
+relatively large IPv6 and UDP headers down to (in the best case) several bytes.
+
+In Semptember 2011 the standard update was published - [RFC6282].
+It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
+used in this Linux implementation.
+
+All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+
+To setup 6lowpan interface you need (busybox release > 1.17.0):
+1. Add IEEE802.15.4 interface and initialize PANid;
+2. Add 6lowpan interface by command like:
+   # ip link add link wpan0 name lowpan0 type lowpan
+3. Set MAC (if needs):
+   # ip link set lowpan0 address de:ad:be:ef:ca:fe:ba:be
+4. Bring up 'lowpan0' interface
index 65968fb..ac5debb 100644 (file)
@@ -539,12 +539,14 @@ static int if_getconfig(char *ifname)
                metric = 0;
        } else
                metric = ifr.ifr_metric;
+       printf("The result of SIOCGIFMETRIC is %d\n", metric);
 
        strcpy(ifr.ifr_name, ifname);
        if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0)
                mtu = 0;
        else
                mtu = ifr.ifr_mtu;
+       printf("The result of SIOCGIFMTU is %d\n", mtu);
 
        strcpy(ifr.ifr_name, ifname);
        if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) {
index 589f2da..ad3e80e 100644 (file)
@@ -31,6 +31,16 @@ neigh/default/gc_thresh3 - INTEGER
        when using large numbers of interfaces and when communicating
        with large numbers of directly-connected peers.
 
+neigh/default/unres_qlen_bytes - INTEGER
+       The maximum number of bytes which may be used by packets
+       queued for each unresolved address by other network layers.
+       (added in linux 3.3)
+
+neigh/default/unres_qlen - INTEGER
+       The maximum number of packets which may be queued for each
+       unresolved address by other network layers.
+       (deprecated in linux 3.3) : use unres_qlen_bytes instead.
+
 mtu_expires - INTEGER
        Time, in seconds, that cached PMTU information is kept.
 
@@ -165,6 +175,9 @@ tcp_congestion_control - STRING
        connections. The algorithm "reno" is always available, but
        additional choices may be available based on kernel configuration.
        Default is set as part of kernel configuration.
+       For passive connections, the listener congestion control choice
+       is inherited.
+       [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ]
 
 tcp_cookie_size - INTEGER
        Default size of TCP Cookie Transactions (TCPCT) option, that may be
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
new file mode 100644 (file)
index 0000000..b8a048b
--- /dev/null
@@ -0,0 +1,195 @@
+Open vSwitch datapath developer documentation
+=============================================
+
+The Open vSwitch kernel module allows flexible userspace control over
+flow-level packet processing on selected network devices.  It can be
+used to implement a plain Ethernet switch, network device bonding,
+VLAN processing, network access control, flow-based network control,
+and so on.
+
+The kernel module implements multiple "datapaths" (analogous to
+bridges), each of which can have multiple "vports" (analogous to ports
+within a bridge).  Each datapath also has associated with it a "flow
+table" that userspace populates with "flows" that map from keys based
+on packet headers and metadata to sets of actions.  The most common
+action forwards the packet to another vport; other actions are also
+implemented.
+
+When a packet arrives on a vport, the kernel module processes it by
+extracting its flow key and looking it up in the flow table.  If there
+is a matching flow, it executes the associated actions.  If there is
+no match, it queues the packet to userspace for processing (as part of
+its processing, userspace will likely set up a flow to handle further
+packets of the same type entirely in-kernel).
+
+
+Flow key compatibility
+----------------------
+
+Network protocols evolve over time.  New protocols become important
+and existing protocols lose their prominence.  For the Open vSwitch
+kernel module to remain relevant, it must be possible for newer
+versions to parse additional protocols as part of the flow key.  It
+might even be desirable, someday, to drop support for parsing
+protocols that have become obsolete.  Therefore, the Netlink interface
+to Open vSwitch is designed to allow carefully written userspace
+applications to work with any version of the flow key, past or future.
+
+To support this forward and backward compatibility, whenever the
+kernel module passes a packet to userspace, it also passes along the
+flow key that it parsed from the packet.  Userspace then extracts its
+own notion of a flow key from the packet and compares it against the
+kernel-provided version:
+
+    - If userspace's notion of the flow key for the packet matches the
+      kernel's, then nothing special is necessary.
+
+    - If the kernel's flow key includes more fields than the userspace
+      version of the flow key, for example if the kernel decoded IPv6
+      headers but userspace stopped at the Ethernet type (because it
+      does not understand IPv6), then again nothing special is
+      necessary.  Userspace can still set up a flow in the usual way,
+      as long as it uses the kernel-provided flow key to do it.
+
+    - If the userspace flow key includes more fields than the
+      kernel's, for example if userspace decoded an IPv6 header but
+      the kernel stopped at the Ethernet type, then userspace can
+      forward the packet manually, without setting up a flow in the
+      kernel.  This case is bad for performance because every packet
+      that the kernel considers part of the flow must go to userspace,
+      but the forwarding behavior is correct.  (If userspace can
+      determine that the values of the extra fields would not affect
+      forwarding behavior, then it could set up a flow anyway.)
+
+How flow keys evolve over time is important to making this work, so
+the following sections go into detail.
+
+
+Flow key format
+---------------
+
+A flow key is passed over a Netlink socket as a sequence of Netlink
+attributes.  Some attributes represent packet metadata, defined as any
+information about a packet that cannot be extracted from the packet
+itself, e.g. the vport on which the packet was received.  Most
+attributes, however, are extracted from headers within the packet,
+e.g. source and destination addresses from Ethernet, IP, or TCP
+headers.
+
+The <linux/openvswitch.h> header file defines the exact format of the
+flow key attributes.  For informal explanatory purposes here, we write
+them as comma-separated strings, with parentheses indicating arguments
+and nesting.  For example, the following could represent a flow key
+corresponding to a TCP packet that arrived on vport 1:
+
+    in_port(1), eth(src=e0:91:f5:21:d0:b2, dst=00:02:e3:0f:80:a4),
+    eth_type(0x0800), ipv4(src=172.16.0.20, dst=172.18.0.52, proto=17, tos=0,
+    frag=no), tcp(src=49163, dst=80)
+
+Often we ellipsize arguments not important to the discussion, e.g.:
+
+    in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
+
+
+Basic rule for evolving flow keys
+---------------------------------
+
+Some care is needed to really maintain forward and backward
+compatibility for applications that follow the rules listed under
+"Flow key compatibility" above.
+
+The basic rule is obvious:
+
+    ------------------------------------------------------------------
+    New network protocol support must only supplement existing flow
+    key attributes.  It must not change the meaning of already defined
+    flow key attributes.
+    ------------------------------------------------------------------
+
+This rule does have less-obvious consequences so it is worth working
+through a few examples.  Suppose, for example, that the kernel module
+did not already implement VLAN parsing.  Instead, it just interpreted
+the 802.1Q TPID (0x8100) as the Ethertype then stopped parsing the
+packet.  The flow key for any packet with an 802.1Q header would look
+essentially like this, ignoring metadata:
+
+    eth(...), eth_type(0x8100)
+
+Naively, to add VLAN support, it makes sense to add a new "vlan" flow
+key attribute to contain the VLAN tag, then continue to decode the
+encapsulated headers beyond the VLAN tag using the existing field
+definitions.  With this change, an TCP packet in VLAN 10 would have a
+flow key much like this:
+
+    eth(...), vlan(vid=10, pcp=0), eth_type(0x0800), ip(proto=6, ...), tcp(...)
+
+But this change would negatively affect a userspace application that
+has not been updated to understand the new "vlan" flow key attribute.
+The application could, following the flow compatibility rules above,
+ignore the "vlan" attribute that it does not understand and therefore
+assume that the flow contained IP packets.  This is a bad assumption
+(the flow only contains IP packets if one parses and skips over the
+802.1Q header) and it could cause the application's behavior to change
+across kernel versions even though it follows the compatibility rules.
+
+The solution is to use a set of nested attributes.  This is, for
+example, why 802.1Q support uses nested attributes.  A TCP packet in
+VLAN 10 is actually expressed as:
+
+    eth(...), eth_type(0x8100), vlan(vid=10, pcp=0), encap(eth_type(0x0800),
+    ip(proto=6, ...), tcp(...)))
+
+Notice how the "eth_type", "ip", and "tcp" flow key attributes are
+nested inside the "encap" attribute.  Thus, an application that does
+not understand the "vlan" key will not see either of those attributes
+and therefore will not misinterpret them.  (Also, the outer eth_type
+is still 0x8100, not changed to 0x0800.)
+
+Handling malformed packets
+--------------------------
+
+Don't drop packets in the kernel for malformed protocol headers, bad
+checksums, etc.  This would prevent userspace from implementing a
+simple Ethernet switch that forwards every packet.
+
+Instead, in such a case, include an attribute with "empty" content.
+It doesn't matter if the empty content could be valid protocol values,
+as long as those values are rarely seen in practice, because userspace
+can always forward all packets with those values to userspace and
+handle them individually.
+
+For example, consider a packet that contains an IP header that
+indicates protocol 6 for TCP, but which is truncated just after the IP
+header, so that the TCP header is missing.  The flow key for this
+packet would include a tcp attribute with all-zero src and dst, like
+this:
+
+    eth(...), eth_type(0x0800), ip(proto=6, ...), tcp(src=0, dst=0)
+
+As another example, consider a packet with an Ethernet type of 0x8100,
+indicating that a VLAN TCI should follow, but which is truncated just
+after the Ethernet type.  The flow key for this packet would include
+an all-zero-bits vlan and an empty encap attribute, like this:
+
+    eth(...), eth_type(0x8100), vlan(0), encap()
+
+Unlike a TCP packet with source and destination ports 0, an
+all-zero-bits VLAN TCI is not that rare, so the CFI bit (aka
+VLAN_TAG_PRESENT inside the kernel) is ordinarily set in a vlan
+attribute expressly to allow this situation to be distinguished.
+Thus, the flow key in this second example unambiguously indicates a
+missing or malformed VLAN TCI.
+
+Other rules
+-----------
+
+The other rules for flow keys are much less subtle:
+
+    - Duplicate attributes are not allowed at a given nesting level.
+
+    - Ordering of attributes is not significant.
+
+    - When the kernel sends a given flow key to userspace, it always
+      composes it the same way.  This allows userspace to hash and
+      compare entire flow keys that it may not be able to fully
+      interpret.
diff --git a/Documentation/networking/team.txt b/Documentation/networking/team.txt
new file mode 100644 (file)
index 0000000..5a01368
--- /dev/null
@@ -0,0 +1,2 @@
+Team devices are driven from userspace via libteam library which is here:
+       https://github.com/jpirko/libteam
index 4475602..209ad06 100644 (file)
@@ -4868,6 +4868,14 @@ S:       Maintained
 T:     git git://openrisc.net/~jonas/linux
 F:     arch/openrisc
 
+OPENVSWITCH
+M:     Jesse Gross <jesse@nicira.com>
+L:     dev@openvswitch.org
+W:     http://openvswitch.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+S:     Maintained
+F:     net/openvswitch/
+
 OPL4 DRIVER
 M:     Clemens Ladisch <clemens@ladisch.de>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -6510,6 +6518,13 @@ W:       http://tcp-lp-mod.sourceforge.net/
 S:     Maintained
 F:     net/ipv4/tcp_lp.c
 
+TEAM DRIVER
+M:     Jiri Pirko <jpirko@redhat.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/team/
+F:     include/linux/if_team.h
+
 TEGRA SUPPORT
 M:     Colin Cross <ccross@android.com>
 M:     Olof Johansson <olof@lixom.net>
index 06edfef..082355f 100644 (file)
@@ -69,6 +69,9 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
index 90ffd04..dec6f9a 100644 (file)
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
index c8d1fae..247b88c 100644 (file)
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* __ASM_AVR32_SOCKET_H */
index 1a4a619..e269264 100644 (file)
@@ -64,6 +64,9 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
 
 
index a6b2688..ce80fda 100644 (file)
@@ -62,5 +62,8 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
 
index 04c0f45..cf1daab 100644 (file)
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
index 51427ea..4b03664 100644 (file)
@@ -71,4 +71,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_IA64_SOCKET_H */
index 469787c..e8b8c5b 100644 (file)
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_M32R_SOCKET_H */
index 9bf49c8..d4708ce 100644 (file)
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
index 9de5190..ad5c0a7 100644 (file)
@@ -82,6 +82,9 @@ To add: #define SO_REUSEPORT 0x0200   /* Allow local address and port reuse.  */
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #ifdef __KERNEL__
 
 /** sock_type - Socket types
index 4e60c42..876356d 100644 (file)
@@ -62,4 +62,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
index 225b7d6..d28c51b 100644 (file)
@@ -61,6 +61,9 @@
 
 #define SO_RXQ_OVFL             0x4021
 
+#define SO_WIFI_STATUS         0x4022
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 /* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
  * have to define SOCK_NONBLOCK to a different value here.
  */
index 9452c3c..d918752 100644 (file)
                ranges = <
                        0 0x0 0xfc000000 0x04000000     // NOR FLASH bank 1
                        1 0x0 0xf8000000 0x08000000     // NOR FLASH bank 0
-                       2 0x0 0xa3000000 0x00008000     // CAN (2 x i82527)
+                       2 0x0 0xa3000000 0x00008000     // CAN (2 x CC770)
                        3 0x0 0xa3010000 0x00008000     // NAND FLASH
 
                >;
                };
 
                /* Note: CAN support needs be enabled in U-Boot */
-               can0@2,0 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,0 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x0 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+                       bosch,clock-out-frequency = <16000000>;
                };
 
-               can1@2,100 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,100 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x100 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
                };
 
                /* Note: NAND support needs to be enabled in U-Boot */
index 619776f..988d887 100644 (file)
                ranges = <
                        0 0x0 0xfc000000 0x04000000     // NOR FLASH bank 1
                        1 0x0 0xf8000000 0x08000000     // NOR FLASH bank 0
-                       2 0x0 0xe3000000 0x00008000     // CAN (2 x i82527)
+                       2 0x0 0xe3000000 0x00008000     // CAN (2 x CC770)
                        3 0x0 0xe3010000 0x00008000     // NAND FLASH
 
                >;
                };
 
                /* Note: CAN support needs be enabled in U-Boot */
-               can0@2,0 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,0 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x0 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+                       bosch,clock-out-frequency = <16000000>;
                };
 
-               can1@2,100 {
-                       compatible = "intel,82527"; // Bosch CC770
+               can@2,100 {
+                       compatible = "bosch,cc770"; // Bosch CC770
                        reg = <2 0x100 0x100>;
                        interrupts = <4 1>;
                        interrupt-parent = <&mpic>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
                };
 
                /* Note: NAND support needs to be enabled in U-Boot */
index f6da7ec..c3dba25 100644 (file)
@@ -57,6 +57,7 @@
 
                ranges = <
                        0x0 0x0 0x40000000 0x800000
+                       0x3 0x0 0xc0000000 0x200
                >;
 
                flash@0,0 {
                        bank-width = <4>;
                        device-width = <2>;
                };
+
+               /* Note: CAN support needs be enabled in U-Boot */
+               can@3,0 {
+                       compatible = "intc,82527";
+                       reg = <3 0x0 0x80>;
+                       interrupts = <8 1>;
+                       interrupt-parent = <&PIC>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+                       bosch,clock-out-frequency = <16000000>;
+               };
+
+               can@3,100 {
+                       compatible = "intc,82527";
+                       reg = <3 0x100 0x80>;
+                       interrupts = <8 1>;
+                       interrupt-parent = <&PIC>;
+                       bosch,external-clock-frequency = <16000000>;
+                       bosch,disconnect-rx1-input;
+                       bosch,disconnect-tx1-output;
+                       bosch,iso-low-speed-mux;
+               };
        };
 
        soc@fff00000 {
index 866f760..2fc2af8 100644 (file)
@@ -69,4 +69,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_POWERPC_SOCKET_H */
index fdff1e9..67b5c1b 100644 (file)
@@ -70,4 +70,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _ASM_SOCKET_H */
index 9d3fefc..8af1b64 100644 (file)
@@ -58,6 +58,9 @@
 
 #define SO_RXQ_OVFL             0x0024
 
+#define SO_WIFI_STATUS         0x0025
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index cbdf2ff..bb06968 100644 (file)
@@ -73,4 +73,7 @@
 
 #define SO_RXQ_OVFL             40
 
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS                SO_WIFI_STATUS
+
 #endif /* _XTENSA_SOCKET_H */
index 3d0c2b0..9e373ba 100644 (file)
@@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev)
           if (ia_vcc == NULL)
           {
              atomic_inc(&vcc->stats->rx_err);
+             atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
-             atm_return(vcc, atm_guess_pdu2truesize(len));
              goto INCR_DLE;
            }
           // get real pkt length  pwang_test
@@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev)
              atomic_inc(&vcc->stats->rx_err);
              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
                                                             length, skb->len);)
+             atm_return(vcc, skb->truesize);
              dev_kfree_skb_any(skb);
-             atm_return(vcc, atm_guess_pdu2truesize(len));
              goto INCR_DLE;
           }
           skb_trim(skb, length);
index 1b51d8b..b0994c0 100644 (file)
@@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
        pr_debug("Switched to core: 0x%X\n", core->id.id);
 }
 
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
 {
+       switch (core->id.id) {
+       case BCMA_CORE_CHIPCOMMON:
+               return 3 * BCMA_CORE_SIZE;
+       case BCMA_CORE_PCIE:
+               return 2 * BCMA_CORE_SIZE;
+       }
+
        if (core->bus->mapped_core != core)
                bcma_host_pci_switch_core(core);
+       return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+       offset += bcma_host_pci_provide_access_to_core(core);
        return ioread8(core->bus->mmio + offset);
 }
 
 static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        return ioread16(core->bus->mmio + offset);
 }
 
 static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        return ioread32(core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
                                 u8 value)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        iowrite8(value, core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
                                 u16 value)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        iowrite16(value, core->bus->mmio + offset);
 }
 
 static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
                                 u32 value)
 {
-       if (core->bus->mapped_core != core)
-               bcma_host_pci_switch_core(core);
+       offset += bcma_host_pci_provide_access_to_core(core);
        iowrite32(value, core->bus->mmio + offset);
 }
 
index 106beb1..1622772 100644 (file)
@@ -30,6 +30,7 @@
 #include <net/bluetooth/bluetooth.h>
 
 #define VERSION "1.0"
+#define ATH3K_FIRMWARE "ath3k-1.fw"
 
 #define ATH3K_DNLOAD                           0x01
 #define ATH3K_GETSTATE                         0x05
@@ -400,9 +401,15 @@ static int ath3k_probe(struct usb_interface *intf,
                return 0;
        }
 
-       if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
-               BT_ERR("Error loading firmware");
-               return -EIO;
+       ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev);
+       if (ret < 0) {
+               if (ret == -ENOENT)
+                       BT_ERR("Firmware file \"%s\" not found",
+                                                       ATH3K_FIRMWARE);
+               else
+                       BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+                                                       ATH3K_FIRMWARE, ret);
+               return ret;
        }
 
        ret = ath3k_load_firmware(udev, firmware);
@@ -441,4 +448,4 @@ MODULE_AUTHOR("Atheros Communications");
 MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("ath3k-1.fw");
+MODULE_FIRMWARE(ATH3K_FIRMWARE);
index 61b5914..a936763 100644 (file)
@@ -751,9 +751,7 @@ static void bfusb_disconnect(struct usb_interface *intf)
 
        bfusb_close(hdev);
 
-       if (hci_unregister_dev(hdev) < 0)
-               BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+       hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 }
 
index aed1904..c6a0c61 100644 (file)
@@ -844,9 +844,7 @@ static int bluecard_close(bluecard_info_t *info)
        /* Turn FPGA off */
        outb(0x80, iobase + 0x30);
 
-       if (hci_unregister_dev(hdev) < 0)
-               BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+       hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 
        return 0;
index 4fc0194..0c97e5d 100644 (file)
@@ -636,9 +636,7 @@ static int bt3c_close(bt3c_info_t *info)
 
        bt3c_hci_close(hdev);
 
-       if (hci_unregister_dev(hdev) < 0)
-               BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+       hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 
        return 0;
index 526b618..200b3a2 100644 (file)
@@ -565,9 +565,7 @@ static int btuart_close(btuart_info_t *info)
 
        spin_unlock_irqrestore(&(info->lock), flags);
 
-       if (hci_unregister_dev(hdev) < 0)
-               BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+       hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 
        return 0;
index eabc437..ea5ad1c 100644 (file)
@@ -315,7 +315,8 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
 
        err = usb_submit_urb(urb, mem_flags);
        if (err < 0) {
-               BT_ERR("%s urb %p submission failed (%d)",
+               if (err != -EPERM && err != -ENODEV)
+                       BT_ERR("%s urb %p submission failed (%d)",
                                                hdev->name, urb, -err);
                usb_unanchor_urb(urb);
        }
@@ -400,7 +401,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
 
        err = usb_submit_urb(urb, mem_flags);
        if (err < 0) {
-               BT_ERR("%s urb %p submission failed (%d)",
+               if (err != -EPERM && err != -ENODEV)
+                       BT_ERR("%s urb %p submission failed (%d)",
                                                hdev->name, urb, -err);
                usb_unanchor_urb(urb);
        }
@@ -523,7 +525,8 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
 
        err = usb_submit_urb(urb, mem_flags);
        if (err < 0) {
-               BT_ERR("%s urb %p submission failed (%d)",
+               if (err != -EPERM && err != -ENODEV)
+                       BT_ERR("%s urb %p submission failed (%d)",
                                                hdev->name, urb, -err);
                usb_unanchor_urb(urb);
        }
@@ -727,6 +730,9 @@ static int btusb_send_frame(struct sk_buff *skb)
                usb_fill_bulk_urb(urb, data->udev, pipe,
                                skb->data, skb->len, btusb_tx_complete, skb);
 
+               if (skb->priority >= HCI_PRIO_MAX - 1)
+                       urb->transfer_flags  = URB_ISO_ASAP;
+
                hdev->stat.acl_tx++;
                break;
 
@@ -770,7 +776,9 @@ skip_waking:
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err < 0) {
-               BT_ERR("%s urb %p submission failed", hdev->name, urb);
+               if (err != -EPERM && err != -ENODEV)
+                       BT_ERR("%s urb %p submission failed (%d)",
+                                               hdev->name, urb, -err);
                kfree(urb->setup_packet);
                usb_unanchor_urb(urb);
        } else {
index 5e4c2de..969bb22 100644 (file)
@@ -551,9 +551,7 @@ static int dtl1_close(dtl1_info_t *info)
 
        spin_unlock_irqrestore(&(info->lock), flags);
 
-       if (hci_unregister_dev(hdev) < 0)
-               BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+       hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 
        return 0;
index 67c180c..2ed6ab1 100644 (file)
@@ -41,6 +41,8 @@
 
 #define VERSION "1.3"
 
+static bool amp;
+
 struct vhci_data {
        struct hci_dev *hdev;
 
@@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file)
        hdev->bus = HCI_VIRTUAL;
        hdev->driver_data = data;
 
+       if (amp)
+               hdev->dev_type = HCI_AMP;
+
        hdev->open     = vhci_open_dev;
        hdev->close    = vhci_close_dev;
        hdev->flush    = vhci_flush;
@@ -264,10 +269,7 @@ static int vhci_release(struct inode *inode, struct file *file)
        struct vhci_data *data = file->private_data;
        struct hci_dev *hdev = data->hdev;
 
-       if (hci_unregister_dev(hdev) < 0) {
-               BT_ERR("Can't unregister HCI device %s", hdev->name);
-       }
-
+       hci_unregister_dev(hdev);
        hci_free_dev(hdev);
 
        file->private_data = NULL;
@@ -306,6 +308,9 @@ static void __exit vhci_exit(void)
 module_init(vhci_init);
 module_exit(vhci_exit);
 
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
 MODULE_VERSION(VERSION);
index eb0e2cc..73d4531 100644 (file)
@@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
 {
        dev->addr_len           = IEEE802154_ADDR_LEN;
        memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-       dev->features           = NETIF_F_NO_CSUM;
+       dev->features           = NETIF_F_HW_CSUM;
        dev->needed_tailroom    = 2; /* FCS */
        dev->mtu                = 127;
        dev->tx_queue_len       = 10;
index e9cf51b..1612cfd 100644 (file)
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
        mutex_unlock(&lock);
 }
 
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+       struct neighbour *n;
+       int ret;
+
+       rcu_read_lock();
+       n = dst_get_neighbour_noref(dst);
+       if (!n || !(n->nud_state & NUD_VALID)) {
+               if (n)
+                       neigh_event_send(n, NULL);
+               ret = -ENODATA;
+       } else {
+               ret = rdma_copy_addr(addr, dst->dev, n->ha);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static int addr4_resolve(struct sockaddr_in *src_in,
                         struct sockaddr_in *dst_in,
                         struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
        __be32 src_ip = src_in->sin_addr.s_addr;
        __be32 dst_ip = dst_in->sin_addr.s_addr;
        struct rtable *rt;
-       struct neighbour *neigh;
        struct flowi4 fl4;
        int ret;
 
@@ -214,20 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
                goto put;
        }
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
-       if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               rcu_read_lock();
-               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-               rcu_read_unlock();
-               ret = -ENODATA;
-               if (neigh)
-                       goto release;
-               goto put;
-       }
-
-       ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
-       neigh_release(neigh);
+       ret = dst_fetch_ha(&rt->dst, addr);
 put:
        ip_rt_put(rt);
 out:
@@ -240,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                         struct rdma_dev_addr *addr)
 {
        struct flowi6 fl6;
-       struct neighbour *neigh;
        struct dst_entry *dst;
        int ret;
 
        memset(&fl6, 0, sizeof fl6);
-       ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
-       ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+       fl6.daddr = dst_in->sin6_addr;
+       fl6.saddr = src_in->sin6_addr;
        fl6.flowi6_oif = addr->bound_dev_if;
 
        dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -260,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                        goto put;
 
                src_in->sin6_family = AF_INET6;
-               ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+               src_in->sin6_addr = fl6.saddr;
        }
 
        if (dst->dev->flags & IFF_LOOPBACK) {
@@ -276,16 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
                goto put;
        }
 
-       rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
-       if (!neigh || !(neigh->nud_state & NUD_VALID)) {
-               if (neigh)
-                       neigh_event_send(neigh, NULL);
-               ret = -ENODATA;
-       } else {
-               ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
-       }
-       rcu_read_unlock();
+       ret = dst_fetch_ha(dst, addr);
 put:
        dst_release(dst);
        return ret;
index 75ff821..09e66cc 100644 (file)
@@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
        if (cma_zero_addr(src)) {
                dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
                if ((src->sa_family = dst->sa_family) == AF_INET) {
-                       ((struct sockaddr_in *) src)->sin_addr.s_addr =
-                               ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+                       ((struct sockaddr_in *)src)->sin_addr =
+                               ((struct sockaddr_in *)dst)->sin_addr;
                } else {
-                       ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
-                                      &((struct sockaddr_in6 *) dst)->sin6_addr);
+                       ((struct sockaddr_in6 *)src)->sin6_addr =
+                               ((struct sockaddr_in6 *)dst)->sin6_addr;
                }
        }
 
index c88b12b..740dcc0 100644 (file)
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        struct iwch_ep *child_ep, *parent_ep = ctx;
        struct cpl_pass_accept_req *req = cplhdr(skb);
        unsigned int hwtid = GET_TID(req);
-       struct neighbour *neigh;
        struct dst_entry *dst;
        struct l2t_entry *l2t;
        struct rtable *rt;
@@ -1375,10 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                goto reject;
        }
        dst = &rt->dst;
-       rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
-       l2t = t3_l2t_get(tdev, neigh, neigh->dev);
-       rcu_read_unlock();
+       l2t = t3_l2t_get(tdev, dst, NULL);
        if (!l2t) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -1889,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
        struct iwch_dev *h = to_iwch_dev(cm_id->device);
-       struct neighbour *neigh;
        struct iwch_ep *ep;
        struct rtable *rt;
        int err = 0;
@@ -1947,13 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                goto fail3;
        }
        ep->dst = &rt->dst;
-
-       rcu_read_lock();
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
-       rcu_read_unlock();
+       ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
        if (!ep->l2t) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                err = -ENOMEM;
index 0747004..0668bb3 100644 (file)
@@ -1556,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
        return;
 }
 
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+                    struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+       struct neighbour *n;
+       int err, step;
+
+       rcu_read_lock();
+       n = dst_get_neighbour_noref(dst);
+       err = -ENODEV;
+       if (!n)
+               goto out;
+       err = -ENOMEM;
+       if (n->dev->flags & IFF_LOOPBACK) {
+               struct net_device *pdev;
+
+               pdev = ip_dev_find(&init_net, peer_ip);
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, pdev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = pdev->mtu;
+               ep->tx_chan = cxgb4_port_chan(pdev);
+               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(pdev) * step;
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->ctrlq_idx = cxgb4_port_idx(pdev);
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(pdev) * step];
+               dev_put(pdev);
+       } else {
+               ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+                                       n, n->dev, 0);
+               if (!ep->l2t)
+                       goto out;
+               ep->mtu = dst_mtu(ep->dst);
+               ep->tx_chan = cxgb4_port_chan(n->dev);
+               ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+               step = cdev->rdev.lldi.ntxq /
+                       cdev->rdev.lldi.nchan;
+               ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+               ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+               step = cdev->rdev.lldi.nrxq /
+                       cdev->rdev.lldi.nchan;
+               ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+                       cxgb4_port_idx(n->dev) * step];
+
+               if (clear_mpa_v1) {
+                       ep->retry_with_mpa_v1 = 0;
+                       ep->tried_with_mpa_v1 = 0;
+               }
+       }
+       err = 0;
+out:
+       rcu_read_unlock();
+
+       return err;
+}
+
 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct c4iw_ep *child_ep, *parent_ep;
@@ -1563,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
        struct tid_info *t = dev->rdev.lldi.tids;
        unsigned int hwtid = GET_TID(req);
-       struct neighbour *neigh;
        struct dst_entry *dst;
-       struct l2t_entry *l2t;
        struct rtable *rt;
        __be32 local_ip, peer_ip;
        __be16 local_port, peer_port;
-       struct net_device *pdev;
-       u32 tx_chan, smac_idx;
-       u16 rss_qid;
-       u32 mtu;
-       int step;
-       int txq_idx, ctrlq_idx;
+       int err;
 
        parent_ep = lookup_stid(t, stid);
        PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1596,49 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                goto reject;
        }
        dst = &rt->dst;
-       rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               pdev = ip_dev_find(&init_net, peer_ip);
-               BUG_ON(!pdev);
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
-               mtu = pdev->mtu;
-               tx_chan = cxgb4_port_chan(pdev);
-               smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(pdev) * step;
-               ctrlq_idx = cxgb4_port_idx(pdev);
-               step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-               rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
-               mtu = dst_mtu(dst);
-               tx_chan = cxgb4_port_chan(neigh->dev);
-               smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
-               txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
-               rss_qid = dev->rdev.lldi.rxq_ids[
-                         cxgb4_port_idx(neigh->dev) * step];
-       }
-       rcu_read_unlock();
-       if (!l2t) {
-               printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+       child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+       if (!child_ep) {
+               printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
                       __func__);
                dst_release(dst);
                goto reject;
        }
 
-       child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
-       if (!child_ep) {
-               printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+       err = import_ep(child_ep, peer_ip, dst, dev, false);
+       if (err) {
+               printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
-               cxgb4_l2t_release(l2t);
                dst_release(dst);
+               kfree(child_ep);
                goto reject;
        }
+
        state_set(&child_ep->com, CONNECTING);
        child_ep->com.dev = dev;
        child_ep->com.cm_id = NULL;
@@ -1651,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
        child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
-       child_ep->l2t = l2t;
        child_ep->dst = dst;
        child_ep->hwtid = hwtid;
-       child_ep->tx_chan = tx_chan;
-       child_ep->smac_idx = smac_idx;
-       child_ep->rss_qid = rss_qid;
-       child_ep->mtu = mtu;
-       child_ep->txq_idx = txq_idx;
-       child_ep->ctrlq_idx = ctrlq_idx;
 
        PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
-            tx_chan, smac_idx, rss_qid);
+            child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
 
        init_timer(&child_ep->timer);
        cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1792,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
 
 static int c4iw_reconnect(struct c4iw_ep *ep)
 {
-       int err = 0;
        struct rtable *rt;
-       struct net_device *pdev;
-       struct neighbour *neigh;
-       int step;
+       int err = 0;
 
        PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
        init_timer(&ep->timer);
@@ -1824,47 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
        }
        ep->dst = &rt->dst;
 
-       rcu_read_lock();
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                                  ep->com.cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, pdev, 0);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = ep->com.dev->rdev.lldi.nrxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, neigh->dev, 0);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = ep->com.dev->rdev.lldi.nrxq /
-                       ep->com.dev->rdev.lldi.nchan;
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                       cxgb4_port_idx(neigh->dev) * step];
-       }
-       rcu_read_unlock();
-       if (!ep->l2t) {
+       err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+                       ep->dst, ep->com.dev, false);
+       if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
@@ -2240,13 +2222,10 @@ err:
 
 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 {
-       int err = 0;
        struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
        struct c4iw_ep *ep;
        struct rtable *rt;
-       struct net_device *pdev;
-       struct neighbour *neigh;
-       int step;
+       int err = 0;
 
        if ((conn_param->ord > c4iw_max_read_depth) ||
            (conn_param->ird > c4iw_max_read_depth)) {
@@ -2307,49 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        ep->dst = &rt->dst;
 
-       rcu_read_lock();
-       neigh = dst_get_neighbour(ep->dst);
-
-       /* get a l2t entry */
-       if (neigh->dev->flags & IFF_LOOPBACK) {
-               PDBG("%s LOOPBACK\n", __func__);
-               pdev = ip_dev_find(&init_net,
-                                  cm_id->remote_addr.sin_addr.s_addr);
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, pdev, 0);
-               ep->mtu = pdev->mtu;
-               ep->tx_chan = cxgb4_port_chan(pdev);
-               ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(pdev) * step;
-               step = ep->com.dev->rdev.lldi.nrxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->ctrlq_idx = cxgb4_port_idx(pdev);
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(pdev) * step];
-               dev_put(pdev);
-       } else {
-               ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
-                                       neigh, neigh->dev, 0);
-               ep->mtu = dst_mtu(ep->dst);
-               ep->tx_chan = cxgb4_port_chan(neigh->dev);
-               ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
-               step = ep->com.dev->rdev.lldi.ntxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
-               ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
-               step = ep->com.dev->rdev.lldi.nrxq /
-                      ep->com.dev->rdev.lldi.nchan;
-               ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
-                             cxgb4_port_idx(neigh->dev) * step];
-               ep->retry_with_mpa_v1 = 0;
-               ep->tried_with_mpa_v1 = 0;
-       }
-       rcu_read_unlock();
-       if (!ep->l2t) {
+       err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+                       ep->dst, ep->com.dev, true);
+       if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
-               err = -ENOMEM;
                goto fail4;
        }
 
index f36da99..95c94d8 100644 (file)
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
 
        err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
                           in_modifier, op_modifier,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
 
        if (!err)
                memcpy(response_mad, outmailbox->buf, 256);
@@ -330,7 +331,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                return IB_MAD_RESULT_FAILURE;
 
        err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
-                          MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_WRAPPED);
        if (err)
                err = IB_MAD_RESULT_FAILURE;
        else {
index 77f3dbc..b8279de 100644 (file)
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
 {
        struct mlx4_dev *dev = to_mdev(device)->dev;
 
-       return dev->caps.port_mask & (1 << (port_num - 1)) ?
+       return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
                IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
 }
 
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
        memset(mailbox->buf, 0, 256);
        memcpy(mailbox->buf, props->node_desc, 64);
        mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
-                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+                MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
 
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
        }
 
        err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev->dev, mailbox);
        return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
        memcpy(gids, gw->gids, sizeof gw->gids);
 
        err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+                      1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
        if (err)
                printk(KERN_WARNING "set port command failed\n");
        else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        printk_once(KERN_INFO "%s", mlx4_ib_version);
 
+       if (mlx4_is_mfunc(dev)) {
+               printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+               return NULL;
+       }
+
        mlx4_foreach_ib_transport_port(i, dev)
                num_ports++;
 
index 0a52d72..b1e6cae 100644 (file)
@@ -1348,7 +1348,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        else
                netdev = nesvnic->netdev;
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+       rcu_read_lock();
+       neigh = dst_get_neighbour_noref(&rt->dst);
        if (neigh) {
                if (neigh->nud_state & NUD_VALID) {
                        nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,7 +1360,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
                                            neigh->ha, ETH_ALEN)) {
                                        /* Mac address same as in nes_arp_table */
-                                       neigh_release(neigh);
                                        ip_rt_put(rt);
                                        return rc;
                                }
@@ -1373,15 +1373,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                             dst_ip, NES_ARP_ADD);
                        rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
                                           NES_ARP_RESOLVE);
+               } else {
+                       neigh_event_send(neigh, NULL);
                }
-               neigh_release(neigh);
-       }
-
-       if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
-               rcu_read_lock();
-               neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-               rcu_read_unlock();
        }
+       rcu_read_unlock();
        ip_rt_put(rt);
        return rc;
 }
index c00d2f3..4b3fa71 100644 (file)
@@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
        .set_pauseparam = nes_netdev_set_pauseparam,
 };
 
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
 {
        struct nes_adapter *nesadapter = nesdev->nesadapter;
        u32 u32temp;
@@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
        spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
 }
 
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
        struct nes_device *nesdev = nesvnic->nesdev;
index 83695b4..3514ca0 100644 (file)
@@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev)
        return 0;
 }
 
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -556,15 +556,13 @@ static int path_rec_start(struct net_device *dev,
 }
 
 /* called with rcu_read_lock */
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_path *path;
        struct ipoib_neigh *neigh;
-       struct neighbour *n;
        unsigned long flags;
 
-       n = dst_get_neighbour(skb_dst(skb));
        neigh = ipoib_neigh_alloc(n, skb->dev);
        if (!neigh) {
                ++dev->stats.tx_dropped;
@@ -638,16 +636,13 @@ err_drop:
 }
 
 /* called with rcu_read_lock */
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
-       struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *n;
 
        /* Look up path record for unicasts */
-       n = dst_get_neighbour(dst);
        if (n->ha[4] != 0xff) {
-               neigh_add_path(skb, dev);
+               neigh_add_path(skb, n, dev);
                return;
        }
 
@@ -723,12 +718,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
 
        rcu_read_lock();
-       if (likely(skb_dst(skb)))
-               n = dst_get_neighbour(skb_dst(skb));
-
+       if (likely(skb_dst(skb))) {
+               n = dst_get_neighbour_noref(skb_dst(skb));
+               if (!n) {
+                       ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+                       goto unlock;
+               }
+       }
        if (likely(n)) {
                if (unlikely(!*to_ipoib_neigh(n))) {
-                       ipoib_path_lookup(skb, dev);
+                       ipoib_path_lookup(skb, n, dev);
                        goto unlock;
                }
 
@@ -751,7 +751,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        list_del(&neigh->list);
                        ipoib_neigh_free(dev, neigh);
                        spin_unlock_irqrestore(&priv->lock, flags);
-                       ipoib_path_lookup(skb, dev);
+                       ipoib_path_lookup(skb, n, dev);
                        goto unlock;
                }
 
@@ -841,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
        dst = skb_dst(skb);
        n = NULL;
        if (dst)
-               n = dst_get_neighbour_raw(dst);
+               n = dst_get_neighbour_noref_raw(dst);
        if ((!dst || !n) && daddr) {
                struct ipoib_pseudoheader *phdr =
                        (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
@@ -1222,6 +1222,8 @@ static struct net_device *ipoib_add_port(const char *format,
        priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
        priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
 
+       priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
        result = ib_query_pkey(hca, port, 0, &priv->pkey);
        if (result) {
                printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
index 873bff9..f7ff9dd 100644 (file)
@@ -269,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
 
                skb->dev = dev;
                if (dst)
-                       n = dst_get_neighbour_raw(dst);
+                       n = dst_get_neighbour_noref_raw(dst);
                if (!dst || !n) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -728,7 +728,7 @@ out:
 
                rcu_read_lock();
                if (dst)
-                       n = dst_get_neighbour(dst);
+                       n = dst_get_neighbour_noref(dst);
                if (n && !*to_ipoib_neigh(n)) {
                        struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
                                                                      skb->dev);
index 04231cb..1793ba1 100644 (file)
@@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
 {
        isdn_if *iif;
 
-       pr_info("ISDN4Linux interface\n");
-
        iif = kmalloc(sizeof *iif, GFP_KERNEL);
        if (!iif) {
                pr_err("out of memory\n");
@@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs)
  */
 void gigaset_isdn_regdrv(void)
 {
+       pr_info("ISDN4Linux interface\n");
        /* nothing to do */
 }
 
index 0dc30ff..595d731 100644 (file)
@@ -381,6 +381,11 @@ error:
        return PTR_ERR(vqs[i]);
 }
 
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+       return "";
+}
+
 /* The ops structure which hooks everything together. */
 static struct virtio_config_ops lguest_config_ops = {
        .get_features = lg_get_features,
@@ -392,6 +397,7 @@ static struct virtio_config_ops lguest_config_ops = {
        .reset = lg_reset,
        .find_vqs = lg_find_vqs,
        .del_vqs = lg_del_vqs,
+       .bus_name = lg_bus_name,
 };
 
 /*
index 7b33de9..0ff4b02 100644 (file)
@@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
        eeprom->reg_data_out = 0;
        eeprom->reg_data_clock = 0;
        eeprom->reg_chip_select = 1;
+       eeprom->drive_data = 1;
        eeprom->register_write(eeprom);
 
        /*
@@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
         */
        eeprom->reg_data_in = 0;
        eeprom->reg_data_out = 0;
+       eeprom->drive_data = 1;
 
        /*
         * Start writing all bits.
@@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
         */
        eeprom->reg_data_in = 0;
        eeprom->reg_data_out = 0;
+       eeprom->drive_data = 0;
 
        /*
         * Start reading all bits.
@@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
 }
 EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
 
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+       u16 command;
+
+       /* start the command */
+       eeprom_93cx6_startup(eeprom);
+
+       /* create command to enable/disable */
+
+       command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+       command <<= (eeprom->width - 2);
+
+       eeprom_93cx6_write_bits(eeprom, command,
+                               PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+       eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+       int timeout = 100;
+       u16 command;
+
+       /* start the command */
+       eeprom_93cx6_startup(eeprom);
+
+       command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+       command |= addr;
+
+       /* send write command */
+       eeprom_93cx6_write_bits(eeprom, command,
+                               PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+       /* send data */
+       eeprom_93cx6_write_bits(eeprom, data, 16);
+
+       /* get ready to check for busy */
+       eeprom->drive_data = 0;
+       eeprom->reg_chip_select = 1;
+       eeprom->register_write(eeprom);
+
+       /* wait at-least 250ns to get DO to be the busy signal */
+       usleep_range(1000, 2000);
+
+       /* wait for DO to go high to signify finish */
+
+       while (true) {
+               eeprom->register_read(eeprom);
+
+               if (eeprom->reg_data_out)
+                       break;
+
+               usleep_range(1000, 2000);
+
+               if (--timeout <= 0) {
+                       printk(KERN_ERR "%s: timeout\n", __func__);
+                       break;
+               }
+       }
+
+       eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
index 42f0673..3fac67a 100644 (file)
@@ -576,7 +576,7 @@ xpnet_init(void)
         * report an error if the data is not retrievable and the
         * packet will be dropped.
         */
-       xpnet_device->features = NETIF_F_NO_CSUM;
+       xpnet_device->features = NETIF_F_HW_CSUM;
 
        result = register_netdev(xpnet_device);
        if (result != 0) {
index 654a5e9..9845afb 100644 (file)
@@ -125,6 +125,8 @@ config IFB
          'ifb1' etc.
          Look at the iproute2 documentation directory for usage etc
 
+source "drivers/net/team/Kconfig"
+
 config MACVLAN
        tristate "MAC-VLAN support (EXPERIMENTAL)"
        depends on EXPERIMENTAL
@@ -241,6 +243,8 @@ source "drivers/atm/Kconfig"
 
 source "drivers/net/caif/Kconfig"
 
+source "drivers/net/dsa/Kconfig"
+
 source "drivers/net/ethernet/Kconfig"
 
 source "drivers/net/fddi/Kconfig"
index fa877cd..1988881 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o
 obj-$(CONFIG_NETCONSOLE) += netconsole.o
 obj-$(CONFIG_PHYLIB) += phy/
 obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
 obj-$(CONFIG_CAIF) += caif/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
 obj-$(CONFIG_ETHERNET) += ethernet/
 obj-$(CONFIG_FDDI) += fddi/
 obj-$(CONFIG_HIPPI) += hippi/
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644 (file)
index 027a0ee..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
-       struct inet6_dev *idev;
-
-       if (!dev)
-               return;
-
-       idev = in6_dev_get(dev);
-       if (!idev)
-               return;
-
-       read_lock_bh(&idev->lock);
-       if (!list_empty(&idev->addr_list)) {
-               struct inet6_ifaddr *ifa
-                       = list_first_entry(&idev->addr_list,
-                                          struct inet6_ifaddr, if_list);
-               ipv6_addr_copy(addr, &ifa->addr);
-       } else
-               ipv6_addr_set(addr, 0, 0, 0, 0);
-
-       read_unlock_bh(&idev->lock);
-
-       in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
-                        struct in6_addr *daddr,
-                        int router,
-                        unsigned short vlan_id)
-{
-       struct in6_addr mcaddr;
-       struct icmp6hdr icmp6h = {
-               .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
-       };
-       struct sk_buff *skb;
-
-       icmp6h.icmp6_router = router;
-       icmp6h.icmp6_solicited = 0;
-       icmp6h.icmp6_override = 1;
-
-       addrconf_addr_solict_mult(daddr, &mcaddr);
-
-       pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
-                slave_dev->name, &mcaddr, daddr);
-
-       skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
-                             ND_OPT_TARGET_LL_ADDR);
-
-       if (!skb) {
-               pr_err("NA packet allocation failed\n");
-               return;
-       }
-
-       if (vlan_id) {
-               /* The Ethernet header is not present yet, so it is
-                * too early to insert a VLAN tag.  Force use of an
-                * out-of-line tag here and let dev_hard_start_xmit()
-                * insert it if the slave hardware can't.
-                */
-               skb = __vlan_hwaccel_put_tag(skb, vlan_id);
-               if (!skb) {
-                       pr_err("failed to insert VLAN tag\n");
-                       return;
-               }
-       }
-
-       ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master.  This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
-       struct slave *slave = bond->curr_active_slave;
-       struct vlan_entry *vlan;
-       struct inet6_dev *idev;
-       int is_router;
-
-       pr_debug("%s: bond %s slave %s\n", bond->dev->name,
-                __func__, slave ? slave->dev->name : "NULL");
-
-       if (!slave || !bond->send_unsol_na ||
-           test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
-               return;
-
-       bond->send_unsol_na--;
-
-       idev = in6_dev_get(bond->dev);
-       if (!idev)
-               return;
-
-       is_router = !!idev->cnf.forwarding;
-
-       in6_dev_put(idev);
-
-       if (!ipv6_addr_any(&bond->master_ipv6))
-               bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-               if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
-                       bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
-                                    vlan->vlan_id);
-               }
-       }
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
-                               unsigned long event,
-                               void *ptr)
-{
-       struct inet6_ifaddr *ifa = ptr;
-       struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
-       struct bonding *bond;
-       struct vlan_entry *vlan;
-       struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
-       list_for_each_entry(bond, &bn->dev_list, bond_list) {
-               if (bond->dev == event_dev) {
-                       switch (event) {
-                       case NETDEV_UP:
-                               if (ipv6_addr_any(&bond->master_ipv6))
-                                       ipv6_addr_copy(&bond->master_ipv6,
-                                                      &ifa->addr);
-                               return NOTIFY_OK;
-                       case NETDEV_DOWN:
-                               if (ipv6_addr_equal(&bond->master_ipv6,
-                                                   &ifa->addr))
-                                       bond_glean_dev_ipv6(bond->dev,
-                                                           &bond->master_ipv6);
-                               return NOTIFY_OK;
-                       default:
-                               return NOTIFY_DONE;
-                       }
-               }
-
-               list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
-                       rcu_read_lock();
-                       vlan_dev = __vlan_find_dev_deep(bond->dev,
-                                                       vlan->vlan_id);
-                       rcu_read_unlock();
-                       if (vlan_dev == event_dev) {
-                               switch (event) {
-                               case NETDEV_UP:
-                                       if (ipv6_addr_any(&vlan->vlan_ipv6))
-                                               ipv6_addr_copy(&vlan->vlan_ipv6,
-                                                              &ifa->addr);
-                                       return NOTIFY_OK;
-                               case NETDEV_DOWN:
-                                       if (ipv6_addr_equal(&vlan->vlan_ipv6,
-                                                           &ifa->addr))
-                                               bond_glean_dev_ipv6(vlan_dev,
-                                                                   &vlan->vlan_ipv6);
-                                       return NOTIFY_OK;
-                               default:
-                                       return NOTIFY_DONE;
-                               }
-                       }
-               }
-       }
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
-       .notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
-       register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
-       unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
index 7f87568..0c0dacb 100644 (file)
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being added
  */
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *stop_at;
        int i, res;
 
        bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-                   slave_ops->ndo_vlan_rx_add_vid) {
-                       slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
-               }
+               res = vlan_vid_add(slave->dev, vid);
+               if (res)
+                       goto unwind;
        }
 
        res = bond_add_vlan(bond, vid);
        if (res) {
                pr_err("%s: Error: Failed to add vlan id %d\n",
                       bond_dev->name, vid);
+               return res;
        }
+
+       return 0;
+
+unwind:
+       /* unwind from head to the slave that failed */
+       stop_at = slave;
+       bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+               vlan_vid_del(slave->dev, vid);
+
+       return res;
 }
 
 /**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
  * @bond_dev: bonding net device that got called
  * @vid: vlan id being removed
  */
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
        int i, res;
 
-       bond_for_each_slave(bond, slave, i) {
-               struct net_device *slave_dev = slave->dev;
-               const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-               if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
-                   slave_ops->ndo_vlan_rx_kill_vid) {
-                       slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
-               }
-       }
+       bond_for_each_slave(bond, slave, i)
+               vlan_vid_del(slave->dev, vid);
 
        res = bond_del_vlan(bond, vid);
        if (res) {
                pr_err("%s: Error: Failed to remove vlan id %d\n",
                       bond_dev->name, vid);
+               return res;
        }
+
+       return 0;
 }
 
 static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
 {
        struct vlan_entry *vlan;
-       const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
-       if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-           !(slave_ops->ndo_vlan_rx_add_vid))
-               return;
+       int res;
 
-       list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
-               slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+       list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+               res = vlan_vid_add(slave_dev, vlan->vlan_id);
+               if (res)
+                       pr_warning("%s: Failed to add vlan id %d to device %s\n",
+                                  bond->dev->name, vlan->vlan_id,
+                                  slave_dev->name);
+       }
 }
 
 static void bond_del_vlans_from_slave(struct bonding *bond,
                                      struct net_device *slave_dev)
 {
-       const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct vlan_entry *vlan;
 
-       if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
-           !(slave_ops->ndo_vlan_rx_kill_vid))
-               return;
-
        list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
                if (!vlan->vlan_id)
                        continue;
-               slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+               vlan_vid_del(slave_dev, vlan->vlan_id);
        }
 }
 
@@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev,
        return 0;
 }
 
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct slave *slave;
        struct bonding *bond = netdev_priv(dev);
-       u32 mask;
+       netdev_features_t mask;
        int i;
 
        read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond)
 {
        struct slave *slave;
        struct net_device *bond_dev = bond->dev;
-       u32 vlan_features = BOND_VLAN_FEATURES;
+       netdev_features_t vlan_features = BOND_VLAN_FEATURES;
        unsigned short max_hard_header_len = ETH_HLEN;
        int i;
 
@@ -1897,7 +1897,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *oldcurrent;
        struct sockaddr addr;
-       u32 old_features = bond_dev->features;
+       netdev_features_t old_features = bond_dev->features;
 
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -4339,7 +4339,7 @@ static void bond_setup(struct net_device *bond_dev)
                                NETIF_F_HW_VLAN_RX |
                                NETIF_F_HW_VLAN_FILTER;
 
-       bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+       bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
        bond_dev->features |= bond_dev->hw_features;
 }
 
index 0733525..0a4fc62 100644 (file)
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
        dev_dbg(&cfhsi->ndev->dev, "%s.\n",
                __func__);
 
-
-       ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
-       if (ret) {
-               dev_warn(&cfhsi->ndev->dev,
-                       "%s: can't wake up HSI interface: %d.\n",
-                       __func__, ret);
-               return ret;
-       }
-
        do {
                ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
                                &fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
                }
        } while (1);
 
-       cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
        return ret;
 }
 
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 
                /* Create HSI frame. */
                len = cfhsi_tx_frm(desc, cfhsi);
-               BUG_ON(!len);
+               WARN_ON(!len);
 
                /* Set up new transfer. */
                res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
index 23406e6..9341a2d 100644 (file)
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
                skb_pull(skb, tty_wr);
                if (skb->len == 0) {
                        struct sk_buff *tmp = skb_dequeue(&ser->head);
-                       BUG_ON(tmp != skb);
+                       WARN_ON(tmp != skb);
                        if (in_interrupt())
                                dev_kfree_skb_irq(skb);
                        else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
 
        ser = tty->disc_data;
        BUG_ON(ser == NULL);
-       BUG_ON(ser->tty != tty);
+       WARN_ON(ser->tty != tty);
        handle_tx(ser);
 }
 
index d4b26fb..5b20413 100644 (file)
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
                if ((avail_emptybuff > HIGH_WATERMARK) &&
                                        (!pshm_drv->tx_empty_available)) {
                        pshm_drv->tx_empty_available = 1;
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
                        pshm_drv->cfdev.flowctrl
                                        (pshm_drv->pshm_dev->pshm_netdev,
                                                                CAIF_FLOW_ON);
 
-                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
                        /* Schedule the work queue. if required */
                        if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        list_entry(pshm_drv->rx_full_list.next, struct buf_list,
                                        list);
                list_del_init(&pbuf->list);
+               spin_unlock_irqrestore(&pshm_drv->lock, flags);
 
                /* Retrieve pointer to start of the packet descriptor area. */
                pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        /* Get a suitable CAIF packet and copy in data. */
                        skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
                                                        frm_pck_len + 1);
-                       BUG_ON(skb == NULL);
+
+                       if (skb == NULL) {
+                               pr_info("OOM: Try next frame in descriptor\n");
+                               break;
+                       }
 
                        p = skb_put(skb, frm_pck_len);
                        memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
                        pck_desc++;
                }
 
+               spin_lock_irqsave(&pshm_drv->lock, flags);
                list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
 
                spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
 
                if (skb == NULL)
                        goto send_msg;
-
                /* Check the available no. of buffers in the empty list */
                list_for_each(pos, &pshm_drv->tx_empty_list)
                        avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                                        pshm_drv->tx_empty_available) {
                        /* Update blocking condition. */
                        pshm_drv->tx_empty_available = 0;
+                       spin_unlock_irqrestore(&pshm_drv->lock, flags);
                        pshm_drv->cfdev.flowctrl
                                        (pshm_drv->pshm_dev->pshm_netdev,
                                        CAIF_FLOW_OFF);
+                       spin_lock_irqsave(&pshm_drv->lock, flags);
                }
                /*
                 * We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                        }
 
                        skb = skb_dequeue(&pshm_drv->sk_qhead);
+                       if (skb == NULL)
+                               break;
                        /* Copy in CAIF frame. */
                        skb_copy_bits(skb, 0, pbuf->desc_vptr +
                                        pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
                        pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
                        pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
                                                                        frmlen;
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_irq(skb);
 
                        /* Fill in the shared memory packet descriptor area. */
                        pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
 {
        struct shmdrv_layer *pshm_drv;
-       unsigned long flags = 0;
 
        pshm_drv = netdev_priv(shm_netdev);
 
-       spin_lock_irqsave(&pshm_drv->lock, flags);
-
        skb_queue_tail(&pshm_drv->sk_qhead, skb);
 
-       spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
        /* Schedule Tx work queue. for deferred processing of skbs*/
        if (!work_pending(&pshm_drv->shm_tx_work))
                queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
                                                (NR_TX_BUF * TX_BUF_SZ);
 
+       spin_lock_init(&pshm_drv->lock);
        INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
        INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
        INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
 
                if (pshm_dev->shm_loopback)
-                       tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+                       tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
                else
                        tx_buf->desc_vptr =
                                        ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
                rx_buf->len = RX_BUF_SZ;
 
                if (pshm_dev->shm_loopback)
-                       rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+                       rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
                else
                        rx_buf->desc_vptr =
                                        ioremap(rx_buf->phy_addr, RX_BUF_SZ);
index 05e791f..761057b 100644 (file)
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
                        "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
-                          cfspi->xfer.va_tx,
+                          cfspi->xfer.va_tx[0],
                           (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
 
        len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
        netif_stop_queue(dev);
        return 0;
 }
-static const struct net_device_ops cfspi_ops = {
-       .ndo_open = cfspi_open,
-       .ndo_stop = cfspi_close,
-       .ndo_start_xmit = cfspi_xmit
-};
 
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
 {
+       int res = 0;
        struct cfspi *cfspi = netdev_priv(dev);
-       dev->features = 0;
-       dev->netdev_ops = &cfspi_ops;
-       dev->type = ARPHRD_CAIF;
-       dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-       dev->tx_queue_len = 0;
-       dev->mtu = SPI_MAX_PAYLOAD_SIZE;
-       dev->destructor = free_netdev;
-       skb_queue_head_init(&cfspi->qhead);
-       skb_queue_head_init(&cfspi->chead);
-       cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
-       cfspi->cfdev.use_frag = false;
-       cfspi->cfdev.use_stx = false;
-       cfspi->cfdev.use_fcs = false;
-       cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
-       struct cfspi *cfspi = NULL;
-       struct net_device *ndev;
-       struct cfspi_dev *dev;
-       int res;
-       dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
-       ndev = alloc_netdev(sizeof(struct cfspi),
-                       "cfspi%d", cfspi_setup);
-       if (!ndev)
-               return -ENOMEM;
-
-       cfspi = netdev_priv(ndev);
-       netif_stop_queue(ndev);
-       cfspi->ndev = ndev;
-       cfspi->pdev = pdev;
 
        /* Set flow info. */
        cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
                cfspi->slave_talked = false;
        }
 
-       /* Assign the SPI device. */
-       cfspi->dev = dev;
-       /* Assign the device ifc to this SPI interface. */
-       dev->ifc = &cfspi->ifc;
-
        /* Allocate DMA buffers. */
-       cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
-       if (!cfspi->xfer.va_tx) {
+       cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+       if (!cfspi->xfer.va_tx[0]) {
                res = -ENODEV;
-               goto err_dma_alloc_tx;
+               goto err_dma_alloc_tx_0;
        }
 
        cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
        /* Schedule the work queue. */
        queue_work(cfspi->wq, &cfspi->work);
 
+       return 0;
+
+ err_create_wq:
+       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+       dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+       return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+       struct cfspi *cfspi = netdev_priv(dev);
+
+       /* Remove from list. */
+       spin_lock(&cfspi_list_lock);
+       list_del(&cfspi->list);
+       spin_unlock(&cfspi_list_lock);
+
+       cfspi->ndev = NULL;
+       /* Free DMA buffers. */
+       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+       dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+       set_bit(SPI_TERMINATE, &cfspi->state);
+       wake_up_interruptible(&cfspi->wait);
+       destroy_workqueue(cfspi->wq);
+       /* Destroy debugfs directory and files. */
+       dev_debugfs_rem(cfspi);
+       return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+       .ndo_open = cfspi_open,
+       .ndo_stop = cfspi_close,
+       .ndo_init = cfspi_init,
+       .ndo_uninit = cfspi_uninit,
+       .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+       struct cfspi *cfspi = netdev_priv(dev);
+       dev->features = 0;
+       dev->netdev_ops = &cfspi_ops;
+       dev->type = ARPHRD_CAIF;
+       dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+       dev->tx_queue_len = 0;
+       dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+       dev->destructor = free_netdev;
+       skb_queue_head_init(&cfspi->qhead);
+       skb_queue_head_init(&cfspi->chead);
+       cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+       cfspi->cfdev.use_frag = false;
+       cfspi->cfdev.use_stx = false;
+       cfspi->cfdev.use_fcs = false;
+       cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+       struct cfspi *cfspi = NULL;
+       struct net_device *ndev;
+       struct cfspi_dev *dev;
+       int res;
+       dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+       ndev = alloc_netdev(sizeof(struct cfspi),
+                       "cfspi%d", cfspi_setup);
+       if (!dev)
+               return -ENODEV;
+
+       cfspi = netdev_priv(ndev);
+       netif_stop_queue(ndev);
+       cfspi->ndev = ndev;
+       cfspi->pdev = pdev;
+
+       /* Assign the SPI device. */
+       cfspi->dev = dev;
+       /* Assign the device ifc to this SPI interface. */
+       dev->ifc = &cfspi->ifc;
+
        /* Register network device. */
        res = register_netdev(ndev);
        if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
        return res;
 
  err_net_reg:
-       dev_debugfs_rem(cfspi);
-       set_bit(SPI_TERMINATE, &cfspi->state);
-       wake_up_interruptible(&cfspi->wait);
-       destroy_workqueue(cfspi->wq);
- err_create_wq:
-       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
-       dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
        free_netdev(ndev);
 
        return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
 
 int cfspi_spi_remove(struct platform_device *pdev)
 {
-       struct list_head *list_node;
-       struct list_head *n;
-       struct cfspi *cfspi = NULL;
-       struct cfspi_dev *dev;
-
-       dev = (struct cfspi_dev *)pdev->dev.platform_data;
-       spin_lock(&cfspi_list_lock);
-       list_for_each_safe(list_node, n, &cfspi_list) {
-               cfspi = list_entry(list_node, struct cfspi, list);
-               /* Find the corresponding device. */
-               if (cfspi->dev == dev) {
-                       /* Remove from list. */
-                       list_del(list_node);
-                       /* Free DMA buffers. */
-                       dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
-                       dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
-                       set_bit(SPI_TERMINATE, &cfspi->state);
-                       wake_up_interruptible(&cfspi->wait);
-                       destroy_workqueue(cfspi->wq);
-                       /* Destroy debugfs directory and files. */
-                       dev_debugfs_rem(cfspi);
-                       unregister_netdev(cfspi->ndev);
-                       spin_unlock(&cfspi_list_lock);
-                       return 0;
-               }
-       }
-       spin_unlock(&cfspi_list_lock);
-       return -ENODEV;
+       /* Everything is done in cfspi_uninit(). */
+       return 0;
 }
 
 static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
 
        list_for_each_safe(list_node, n, &cfspi_list) {
                cfspi = list_entry(list_node, struct cfspi, list);
-               platform_device_unregister(cfspi->pdev);
+               unregister_netdev(cfspi->ndev);
        }
 
        /* Destroy sysfs files. */
index f6c98fb..ab45758 100644 (file)
@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
 
 source "drivers/net/can/c_can/Kconfig"
 
+source "drivers/net/can/cc770/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
 source "drivers/net/can/softing/Kconfig"
index 24ebfe8..938be37 100644 (file)
@@ -14,6 +14,7 @@ obj-y                         += softing/
 obj-$(CONFIG_CAN_SJA1000)      += sja1000/
 obj-$(CONFIG_CAN_MSCAN)                += mscan/
 obj-$(CONFIG_CAN_C_CAN)                += c_can/
+obj-$(CONFIG_CAN_CC770)                += cc770/
 obj-$(CONFIG_CAN_AT91)         += at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)      += ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)      += mcp251x.o
index 044ea06..6ea905c 100644 (file)
@@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = {
        .id_table = at91_can_id_table,
 };
 
-static int __init at91_can_module_init(void)
-{
-       return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
-       platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
 
 MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
 MODULE_LICENSE("GPL v2");
index a1c5abc..349e0fa 100644 (file)
@@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = {
        },
 };
 
-static int __init bfin_can_init(void)
-{
-       return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
-       platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
 
 MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
 MODULE_LICENSE("GPL");
index 0b5c6f8..5e1a5ff 100644 (file)
@@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = {
        .remove = __devexit_p(c_can_plat_remove),
 };
 
-static int __init c_can_plat_init(void)
-{
-       return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
-       platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
 
 MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644 (file)
index 0000000..22c07a8
--- /dev/null
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+       tristate "Bosch CC770 and Intel AN82527 devices"
+       depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+       tristate "ISA Bus based legacy CC770 driver"
+       ---help---
+         This driver adds legacy support for CC770 and AN82527 chips
+         connected to the ISA bus using I/O port, memory mapped or
+         indirect access.
+
+config CAN_CC770_PLATFORM
+       tristate "Generic Platform Bus based CC770 driver"
+       ---help---
+         This driver adds support for the CC770 and AN82527 chips
+         connected to the "platform bus" (Linux abstraction for directly
+         to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644 (file)
index 0000000..9fb8321
--- /dev/null
@@ -0,0 +1,9 @@
+#
+#  Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644 (file)
index 0000000..7668967
--- /dev/null
@@ -0,0 +1,881 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+                "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+                "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+       [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+       [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+       [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+       [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+                             CC770_OBJ_FLAG_EFF,
+       [CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 1,
+       .tseg1_max = 16,
+       .tseg2_min = 1,
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 64,
+       .brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+       if (intid == 2)
+               return 0;
+       else
+               return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       u8 msgcfg;
+       unsigned char obj_flags;
+       unsigned int o, mo;
+
+       for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+               obj_flags = priv->obj_flags[o];
+               mo = obj2msgobj(o);
+
+               if (obj_flags & CC770_OBJ_FLAG_RX) {
+                       /*
+                        * We don't need extra objects for RTR and EFF if
+                        * the additional CC770 functions are enabled.
+                        */
+                       if (priv->control_normal_mode & CTRL_EAF) {
+                               if (o > 0)
+                                       continue;
+                               netdev_dbg(dev, "Message object %d for "
+                                          "RX data, RTR, SFF and EFF\n", mo);
+                       } else {
+                               netdev_dbg(dev,
+                                          "Message object %d for RX %s %s\n",
+                                          mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+                                          "RTR" : "data",
+                                          obj_flags & CC770_OBJ_FLAG_EFF ?
+                                          "EFF" : "SFF");
+                       }
+
+                       if (obj_flags & CC770_OBJ_FLAG_EFF)
+                               msgcfg = MSGCFG_XTD;
+                       else
+                               msgcfg = 0;
+                       if (obj_flags & CC770_OBJ_FLAG_RTR)
+                               msgcfg |= MSGCFG_DIR;
+
+                       cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_SET | TXIE_RES |
+                                       RXIE_SET | INTPND_RES);
+
+                       if (obj_flags & CC770_OBJ_FLAG_RTR)
+                               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                               NEWDAT_RES | CPUUPD_SET |
+                                               TXRQST_RES | RMTPND_RES);
+                       else
+                               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                               NEWDAT_RES | MSGLST_RES |
+                                               TXRQST_RES | RMTPND_RES);
+               } else {
+                       netdev_dbg(dev, "Message object %d for "
+                                  "TX data, RTR, SFF and EFF\n", mo);
+
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       RMTPND_RES | TXRQST_RES |
+                                       CPUUPD_RES | NEWDAT_RES);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_RES | TXIE_RES |
+                                       RXIE_RES | INTPND_RES);
+               }
+       }
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+       int o, mo;
+
+       for (o = 0; o <  ARRAY_SIZE(priv->obj_flags); o++) {
+               mo = obj2msgobj(o);
+
+               if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+                       if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+                               continue;
+
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       NEWDAT_RES | MSGLST_RES |
+                                       TXRQST_RES | RMTPND_RES);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_RES | TXIE_RES |
+                                       RXIE_RES | INTPND_RES);
+               } else {
+                       /* Clear message object for send */
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       RMTPND_RES | TXRQST_RES |
+                                       CPUUPD_RES | NEWDAT_RES);
+                       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                                       MSGVAL_RES | TXIE_RES |
+                                       RXIE_RES | INTPND_RES);
+               }
+       }
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* Enable configuration and puts chip in bus-off, disable interrupts */
+       cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+       priv->can.state = CAN_STATE_STOPPED;
+
+       /* Clear interrupts */
+       cc770_read_reg(priv, interrupt);
+
+       /* Clear status register */
+       cc770_write_reg(priv, status, 0);
+
+       /* Disable all used message objects */
+       disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* Clear interrupts */
+       cc770_read_reg(priv, interrupt);
+
+       /* Clear status register and pre-set last error code */
+       cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+       /* Enable all used message objects*/
+       enable_all_objs(dev);
+
+       /*
+        * Clear bus-off, interrupts only for errors,
+        * not for status change
+        */
+       cc770_write_reg(priv, control, priv->control_normal_mode);
+
+       priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+       int mo, id, data;
+
+       /* Enable configuration and put chip in bus-off, disable interrupts */
+       cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+       /* Set CLKOUT divider and slew rates */
+       cc770_write_reg(priv, clkout, priv->clkout);
+
+       /* Configure CPU interface / CLKOUT enable */
+       cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+       /* Set bus configuration  */
+       cc770_write_reg(priv, bus_config, priv->bus_config);
+
+       /* Clear interrupts */
+       cc770_read_reg(priv, interrupt);
+
+       /* Clear status register */
+       cc770_write_reg(priv, status, 0);
+
+       /* Clear and invalidate message objects */
+       for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               INTPND_UNC | RXIE_RES |
+                               TXIE_RES | MSGVAL_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               INTPND_RES | RXIE_RES |
+                               TXIE_RES | MSGVAL_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                               NEWDAT_RES | MSGLST_RES |
+                               TXRQST_RES | RMTPND_RES);
+               for (data = 0; data < 8; data++)
+                       cc770_write_reg(priv, msgobj[mo].data[data], 0);
+               for (id = 0; id < 4; id++)
+                       cc770_write_reg(priv, msgobj[mo].id[id], 0);
+               cc770_write_reg(priv, msgobj[mo].config, 0);
+       }
+
+       /* Set all global ID masks to "don't care" */
+       cc770_write_reg(priv, global_mask_std[0], 0);
+       cc770_write_reg(priv, global_mask_std[1], 0);
+       cc770_write_reg(priv, global_mask_ext[0], 0);
+       cc770_write_reg(priv, global_mask_ext[1], 0);
+       cc770_write_reg(priv, global_mask_ext[2], 0);
+       cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* Enable configuration, put chip in bus-off, disable ints */
+       cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+       /* Configure cpu interface / CLKOUT disable */
+       cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+       /*
+        * Check if hardware reset is still inactive or maybe there
+        * is no chip in this address space
+        */
+       if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+               netdev_info(dev, "probing @0x%p failed (reset)\n",
+                           priv->reg_base);
+               return -ENODEV;
+       }
+
+       /* Write and read back test pattern (some arbitrary values) */
+       cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+       cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+       cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+       if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+           (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+           (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+               netdev_info(dev, "probing @0x%p failed (pattern)\n",
+                           priv->reg_base);
+               return -ENODEV;
+       }
+
+       /* Check if this chip is a CC770 supporting additional functions */
+       if (cc770_read_reg(priv, control) & CTRL_EAF)
+               priv->control_normal_mode |= CTRL_EAF;
+
+       return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       /* leave reset mode */
+       if (priv->can.state != CAN_STATE_STOPPED)
+               set_reset_mode(dev);
+
+       /* leave reset mode */
+       set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+       switch (mode) {
+       case CAN_MODE_START:
+               cc770_start(dev);
+               netif_wake_queue(dev);
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct can_bittiming *bt = &priv->can.bittiming;
+       u8 btr0, btr1;
+
+       btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+       btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+               (((bt->phase_seg2 - 1) & 0x7) << 4);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               btr1 |= 0x80;
+
+       netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+       cc770_write_reg(priv, bit_timing_0, btr0);
+       cc770_write_reg(priv, bit_timing_1, btr1);
+
+       return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+                                 struct can_berr_counter *bec)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+
+       bec->txerr = cc770_read_reg(priv, tx_error_counter);
+       bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+       return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf = (struct can_frame *)skb->data;
+       unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+       u8 dlc, rtr;
+       u32 id;
+       int i;
+
+       if (can_dropped_invalid_skb(dev, skb))
+               return NETDEV_TX_OK;
+
+       if ((cc770_read_reg(priv,
+                           msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+               netdev_err(dev, "TX register is still occupied!\n");
+               return NETDEV_TX_BUSY;
+       }
+
+       netif_stop_queue(dev);
+
+       dlc = cf->can_dlc;
+       id = cf->can_id;
+       if (cf->can_id & CAN_RTR_FLAG)
+               rtr = 0;
+       else
+               rtr = MSGCFG_DIR;
+       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                       RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+       if (id & CAN_EFF_FLAG) {
+               id &= CAN_EFF_MASK;
+               cc770_write_reg(priv, msgobj[mo].config,
+                               (dlc << 4) | rtr | MSGCFG_XTD);
+               cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+               cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+               cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+               cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+       } else {
+               id &= CAN_SFF_MASK;
+               cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+               cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+               cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+       }
+
+       for (i = 0; i < dlc; i++)
+               cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                       RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+       stats->tx_bytes += dlc;
+
+       can_put_echo_skb(skb, dev, 0);
+
+       /*
+        * HM: We had some cases of repeated IRQs so make sure the
+        * INT is acknowledged I know it's already further up, but
+        * doing again fixed the issue
+        */
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+       return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u8 config;
+       u32 id;
+       int i;
+
+       skb = alloc_can_skb(dev, &cf);
+       if (!skb)
+               return;
+
+       config = cc770_read_reg(priv, msgobj[mo].config);
+
+       if (ctrl1 & RMTPND_SET) {
+               /*
+                * Unfortunately, the chip does not store the real message
+                * identifier of the received remote transmission request
+                * frame. Therefore we set it to 0.
+                */
+               cf->can_id = CAN_RTR_FLAG;
+               if (config & MSGCFG_XTD)
+                       cf->can_id |= CAN_EFF_FLAG;
+               cf->can_dlc = 0;
+       } else {
+               if (config & MSGCFG_XTD) {
+                       id = cc770_read_reg(priv, msgobj[mo].id[3]);
+                       id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+                       id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+                       id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+                       id >>= 3;
+                       id |= CAN_EFF_FLAG;
+               } else {
+                       id = cc770_read_reg(priv, msgobj[mo].id[1]);
+                       id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+                       id >>= 5;
+               }
+
+               cf->can_id = id;
+               cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+               for (i = 0; i < cf->can_dlc; i++)
+                       cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+       }
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct can_frame *cf;
+       struct sk_buff *skb;
+       u8 lec;
+
+       netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+       skb = alloc_can_err_skb(dev, &cf);
+       if (!skb)
+               return -ENOMEM;
+
+       /* Use extended functions of the CC770 */
+       if (priv->control_normal_mode & CTRL_EAF) {
+               cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+               cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+       }
+
+       if (status & STAT_BOFF) {
+               /* Disable interrupts */
+               cc770_write_reg(priv, control, CTRL_INI);
+               cf->can_id |= CAN_ERR_BUSOFF;
+               priv->can.state = CAN_STATE_BUS_OFF;
+               can_bus_off(dev);
+       } else if (status & STAT_WARN) {
+               cf->can_id |= CAN_ERR_CRTL;
+               /* Only the CC770 does show error passive */
+               if (cf->data[7] > 127) {
+                       cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+                               CAN_ERR_CRTL_TX_PASSIVE;
+                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
+                       priv->can.can_stats.error_passive++;
+               } else {
+                       cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+                               CAN_ERR_CRTL_TX_WARNING;
+                       priv->can.state = CAN_STATE_ERROR_WARNING;
+                       priv->can.can_stats.error_warning++;
+               }
+       } else {
+               /* Back to error avtive */
+               cf->can_id |= CAN_ERR_PROT;
+               cf->data[2] = CAN_ERR_PROT_ACTIVE;
+               priv->can.state = CAN_STATE_ERROR_ACTIVE;
+       }
+
+       lec = status & STAT_LEC_MASK;
+       if (lec < 7 && lec > 0) {
+               if (lec == STAT_LEC_ACK) {
+                       cf->can_id |= CAN_ERR_ACK;
+               } else {
+                       cf->can_id |= CAN_ERR_PROT;
+                       switch (lec) {
+                       case STAT_LEC_STUFF:
+                               cf->data[2] |= CAN_ERR_PROT_STUFF;
+                               break;
+                       case STAT_LEC_FORM:
+                               cf->data[2] |= CAN_ERR_PROT_FORM;
+                               break;
+                       case STAT_LEC_BIT1:
+                               cf->data[2] |= CAN_ERR_PROT_BIT1;
+                               break;
+                       case STAT_LEC_BIT0:
+                               cf->data[2] |= CAN_ERR_PROT_BIT0;
+                               break;
+                       case STAT_LEC_CRC:
+                               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+                               break;
+                       }
+               }
+       }
+
+       netif_rx(skb);
+
+       stats->rx_packets++;
+       stats->rx_bytes += cf->can_dlc;
+
+       return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       u8 status;
+
+       status = cc770_read_reg(priv, status);
+       /* Reset the status register including RXOK and TXOK */
+       cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+       if (status & (STAT_WARN | STAT_BOFF) ||
+           (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+               cc770_err(dev, status);
+               return status & STAT_BOFF;
+       }
+
+       return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int mo = obj2msgobj(o);
+       u8 ctrl1;
+       int n = CC770_MAX_MSG;
+
+       while (n--) {
+               ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+               if (!(ctrl1 & NEWDAT_SET))  {
+                       /* Check for RTR if additional functions are enabled */
+                       if (priv->control_normal_mode & CTRL_EAF) {
+                               if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+                                     INTPND_SET))
+                                       break;
+                       } else {
+                               break;
+                       }
+               }
+
+               if (ctrl1 & MSGLST_SET) {
+                       stats->rx_over_errors++;
+                       stats->rx_errors++;
+               }
+               if (mo < MSGOBJ_LAST)
+                       cc770_write_reg(priv, msgobj[mo].ctrl1,
+                                       NEWDAT_RES | MSGLST_RES |
+                                       TXRQST_UNC | RMTPND_UNC);
+               cc770_rx(dev, mo, ctrl1);
+
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               MSGVAL_SET | TXIE_RES |
+                               RXIE_SET | INTPND_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                               NEWDAT_RES | MSGLST_RES |
+                               TXRQST_RES | RMTPND_RES);
+       }
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       unsigned int mo = obj2msgobj(o);
+       u8 ctrl0, ctrl1;
+       int n = CC770_MAX_MSG;
+
+       while (n--) {
+               ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+               if (!(ctrl0 & INTPND_SET))
+                       break;
+
+               ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+               cc770_rx(dev, mo, ctrl1);
+
+               cc770_write_reg(priv, msgobj[mo].ctrl0,
+                               MSGVAL_SET | TXIE_RES |
+                               RXIE_SET | INTPND_RES);
+               cc770_write_reg(priv, msgobj[mo].ctrl1,
+                               NEWDAT_RES | CPUUPD_SET |
+                               TXRQST_RES | RMTPND_RES);
+       }
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int mo = obj2msgobj(o);
+
+       /* Nothing more to send, switch off interrupts */
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+       /*
+        * We had some cases of repeated IRQ so make sure the
+        * INT is acknowledged
+        */
+       cc770_write_reg(priv, msgobj[mo].ctrl0,
+                       MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+       stats->tx_packets++;
+       can_get_echo_skb(dev, 0);
+       netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct cc770_priv *priv = netdev_priv(dev);
+       u8 intid;
+       int o, n = 0;
+
+       /* Shared interrupts and IRQ off? */
+       if (priv->can.state == CAN_STATE_STOPPED)
+               return IRQ_NONE;
+
+       if (priv->pre_irq)
+               priv->pre_irq(priv);
+
+       while (n < CC770_MAX_IRQ) {
+               /* Read the highest pending interrupt request */
+               intid = cc770_read_reg(priv, interrupt);
+               if (!intid)
+                       break;
+               n++;
+
+               if (intid == 1) {
+                       /* Exit in case of bus-off */
+                       if (cc770_status_interrupt(dev))
+                               break;
+               } else {
+                       o = intid2obj(intid);
+
+                       if (o >= CC770_OBJ_MAX) {
+                               netdev_err(dev, "Unexpected interrupt id %d\n",
+                                          intid);
+                               continue;
+                       }
+
+                       if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+                               cc770_rtr_interrupt(dev, o);
+                       else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+                               cc770_rx_interrupt(dev, o);
+                       else
+                               cc770_tx_interrupt(dev, o);
+               }
+       }
+
+       if (priv->post_irq)
+               priv->post_irq(priv);
+
+       if (n >= CC770_MAX_IRQ)
+               netdev_dbg(dev, "%d messages handled in ISR", n);
+
+       return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       int err;
+
+       /* set chip into reset mode */
+       set_reset_mode(dev);
+
+       /* common open */
+       err = open_candev(dev);
+       if (err)
+               return err;
+
+       err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+                         dev->name, dev);
+       if (err) {
+               close_candev(dev);
+               return -EAGAIN;
+       }
+
+       /* init and start chip */
+       cc770_start(dev);
+
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+       netif_stop_queue(dev);
+       set_reset_mode(dev);
+
+       free_irq(dev->irq, dev);
+       close_candev(dev);
+
+       return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+       struct net_device *dev;
+       struct cc770_priv *priv;
+
+       dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+                          CC770_ECHO_SKB_MAX);
+       if (!dev)
+               return NULL;
+
+       priv = netdev_priv(dev);
+
+       priv->dev = dev;
+       priv->can.bittiming_const = &cc770_bittiming_const;
+       priv->can.do_set_bittiming = cc770_set_bittiming;
+       priv->can.do_set_mode = cc770_set_mode;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+       memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+       if (sizeof_priv)
+               priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+       free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+       .ndo_open = cc770_open,
+       .ndo_stop = cc770_close,
+       .ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+       struct cc770_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = cc770_probe_chip(dev);
+       if (err)
+               return err;
+
+       dev->netdev_ops = &cc770_netdev_ops;
+
+       dev->flags |= IFF_ECHO; /* we support local echo */
+
+       /* Should we use additional functions? */
+       if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+               priv->can.do_get_berr_counter = cc770_get_berr_counter;
+               priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+               netdev_dbg(dev, "i82527 mode with additional functions\n");
+       } else {
+               priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+               netdev_dbg(dev, "strict i82527 compatibility mode\n");
+       }
+
+       chipset_init(priv);
+       set_reset_mode(dev);
+
+       return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+       set_reset_mode(dev);
+       unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+       if (msgobj15_eff) {
+               cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+               cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+       }
+
+       pr_info("CAN netdevice driver\n");
+
+       return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+       pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644 (file)
index 0000000..a1739db
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+       u8 ctrl0;
+       u8 ctrl1;
+       u8 id[4];
+       u8 config;
+       u8 data[8];
+       u8 dontuse;             /* padding */
+} __packed;
+
+struct cc770_regs {
+       union {
+               struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+               struct {
+                       u8 control;             /* Control Register */
+                       u8 status;              /* Status Register */
+                       u8 cpu_interface;       /* CPU Interface Register */
+                       u8 dontuse1;
+                       u8 high_speed_read[2];  /* High Speed Read */
+                       u8 global_mask_std[2];  /* Standard Global Mask */
+                       u8 global_mask_ext[4];  /* Extended Global Mask */
+                       u8 msg15_mask[4];       /* Message 15 Mask */
+                       u8 dontuse2[15];
+                       u8 clkout;              /* Clock Out Register */
+                       u8 dontuse3[15];
+                       u8 bus_config;          /* Bus Configuration Register */
+                       u8 dontuse4[15];
+                       u8 bit_timing_0;        /* Bit Timing Register byte 0 */
+                       u8 dontuse5[15];
+                       u8 bit_timing_1;        /* Bit Timing Register byte 1 */
+                       u8 dontuse6[15];
+                       u8 interrupt;           /* Interrupt Register */
+                       u8 dontuse7[15];
+                       u8 rx_error_counter;    /* Receive Error Counter */
+                       u8 dontuse8[15];
+                       u8 tx_error_counter;    /* Transmit Error Counter */
+                       u8 dontuse9[31];
+                       u8 p1_conf;
+                       u8 dontuse10[15];
+                       u8 p2_conf;
+                       u8 dontuse11[15];
+                       u8 p1_in;
+                       u8 dontuse12[15];
+                       u8 p2_in;
+                       u8 dontuse13[15];
+                       u8 p1_out;
+                       u8 dontuse14[15];
+                       u8 p2_out;
+                       u8 dontuse15[15];
+                       u8 serial_reset_addr;
+               };
+       };
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI       0x01    /* Initialization */
+#define CTRL_IE                0x02    /* Interrupt Enable */
+#define CTRL_SIE       0x04    /* Status Interrupt Enable */
+#define CTRL_EIE       0x08    /* Error Interrupt Enable */
+#define CTRL_EAF       0x20    /* Enable additional functions */
+#define CTRL_CCE       0x40    /* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF 0x01    /* Stuff error */
+#define STAT_LEC_FORM  0x02    /* Form error */
+#define STAT_LEC_ACK   0x03    /* Acknowledgement error */
+#define STAT_LEC_BIT1  0x04    /* Bit1 error */
+#define STAT_LEC_BIT0  0x05    /* Bit0 error */
+#define STAT_LEC_CRC   0x06    /* CRC error */
+#define STAT_LEC_MASK  0x07    /* Last Error Code mask */
+#define STAT_TXOK      0x08    /* Transmit Message Successfully */
+#define STAT_RXOK      0x10    /* Receive Message Successfully */
+#define STAT_WAKE      0x20    /* Wake Up Status */
+#define STAT_WARN      0x40    /* Warning Status */
+#define STAT_BOFF      0x80    /* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES     0x01    /* No Interrupt pending */
+#define INTPND_SET     0x02    /* Interrupt pending */
+#define INTPND_UNC     0x03
+#define RXIE_RES       0x04    /* Receive Interrupt Disable */
+#define RXIE_SET       0x08    /* Receive Interrupt Enable */
+#define RXIE_UNC       0x0c
+#define TXIE_RES       0x10    /* Transmit Interrupt Disable */
+#define TXIE_SET       0x20    /* Transmit Interrupt Enable */
+#define TXIE_UNC       0x30
+#define MSGVAL_RES     0x40    /* Message Invalid */
+#define MSGVAL_SET     0x80    /* Message Valid */
+#define MSGVAL_UNC     0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES     0x01    /* No New Data */
+#define NEWDAT_SET     0x02    /* New Data */
+#define NEWDAT_UNC     0x03
+#define MSGLST_RES     0x04    /* No Message Lost */
+#define MSGLST_SET     0x08    /* Message Lost */
+#define MSGLST_UNC     0x0c
+#define CPUUPD_RES     0x04    /* No CPU Updating */
+#define CPUUPD_SET     0x08    /* CPU Updating */
+#define CPUUPD_UNC     0x0c
+#define TXRQST_RES     0x10    /* No Transmission Request */
+#define TXRQST_SET     0x20    /* Transmission Request */
+#define TXRQST_UNC     0x30
+#define RMTPND_RES     0x40    /* No Remote Request Pending */
+#define RMTPND_SET     0x80    /* Remote Request Pending */
+#define RMTPND_UNC     0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD     0x04    /* Extended Identifier */
+#define MSGCFG_DIR     0x08    /* Direction is Transmit */
+
+#define MSGOBJ_FIRST   1
+#define MSGOBJ_LAST    15
+
+#define CC770_IO_SIZE  0x100
+#define CC770_MAX_IRQ  20      /* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG  4       /* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX     1
+
+#define cc770_read_reg(priv, member)                                   \
+       priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value)                           \
+       priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX      0x01
+#define CC770_OBJ_FLAG_RTR     0x02
+#define CC770_OBJ_FLAG_EFF     0x04
+
+enum {
+       CC770_OBJ_RX0 = 0,      /* for receiving normal messages */
+       CC770_OBJ_RX1,          /* for receiving normal messages */
+       CC770_OBJ_RX_RTR0,      /* for receiving remote transmission requests */
+       CC770_OBJ_RX_RTR1,      /* for receiving remote transmission requests */
+       CC770_OBJ_TX,           /* for sending messages */
+       CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o)  (MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+       struct can_priv can;    /* must be the first member */
+       struct sk_buff *echo_skb;
+
+       /* the lower-layer is responsible for appropriate locking */
+       u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+       void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+       void (*pre_irq)(const struct cc770_priv *priv);
+       void (*post_irq)(const struct cc770_priv *priv);
+
+       void *priv;             /* for board-specific data */
+       struct net_device *dev;
+
+       void __iomem *reg_base;  /* ioremap'ed address to registers */
+       unsigned long irq_flags; /* for request_irq() */
+
+       unsigned char obj_flags[CC770_OBJ_MAX];
+       u8 control_normal_mode; /* Control register for normal mode */
+       u8 cpu_interface;       /* CPU interface register */
+       u8 clkout;              /* Clock out register */
+       u8 bus_config;          /* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644 (file)
index 0000000..4be5fe2
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ *   insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ *   insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ *   clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ *   cir: CPU interface register (default=0x40 [DSC])
+ *   bcr: Bus configuration register (default=0x40 [CBY])
+ *   cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ *   insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT    16000000        /* 16 MHz */
+#define COR_DEFAULT    0x00
+#define BCR_DEFAULT    BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+                "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE          0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+       return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+                                     int reg, u8 val)
+{
+       writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+       return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+                                      int reg, u8 val)
+{
+       outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+                                            int reg)
+{
+       unsigned long base = (unsigned long)priv->reg_base;
+
+       outb(reg, base);
+       return inb(base + 1);
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+                                               int reg, u8 val)
+{
+       unsigned long base = (unsigned long)priv->reg_base;
+
+       outb(reg, base);
+       outb(val, base + 1);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+       struct net_device *dev;
+       struct cc770_priv *priv;
+       void __iomem *base = NULL;
+       int iosize = CC770_IOSIZE;
+       int idx = pdev->id;
+       int err;
+       u32 clktmp;
+
+       dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+               idx, port[idx], mem[idx], irq[idx]);
+       if (mem[idx]) {
+               if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+                       err = -EBUSY;
+                       goto exit;
+               }
+               base = ioremap_nocache(mem[idx], iosize);
+               if (!base) {
+                       err = -ENOMEM;
+                       goto exit_release;
+               }
+       } else {
+               if (indirect[idx] > 0 ||
+                   (indirect[idx] == -1 && indirect[0] > 0))
+                       iosize = CC770_IOSIZE_INDIRECT;
+               if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+                       err = -EBUSY;
+                       goto exit;
+               }
+       }
+
+       dev = alloc_cc770dev(0);
+       if (!dev) {
+               err = -ENOMEM;
+               goto exit_unmap;
+       }
+       priv = netdev_priv(dev);
+
+       dev->irq = irq[idx];
+       priv->irq_flags = IRQF_SHARED;
+       if (mem[idx]) {
+               priv->reg_base = base;
+               dev->base_addr = mem[idx];
+               priv->read_reg = cc770_isa_mem_read_reg;
+               priv->write_reg = cc770_isa_mem_write_reg;
+       } else {
+               priv->reg_base = (void __iomem *)port[idx];
+               dev->base_addr = port[idx];
+
+               if (iosize == CC770_IOSIZE_INDIRECT) {
+                       priv->read_reg = cc770_isa_port_read_reg_indirect;
+                       priv->write_reg = cc770_isa_port_write_reg_indirect;
+               } else {
+                       priv->read_reg = cc770_isa_port_read_reg;
+                       priv->write_reg = cc770_isa_port_write_reg;
+               }
+       }
+
+       if (clk[idx])
+               clktmp = clk[idx];
+       else if (clk[0])
+               clktmp = clk[0];
+       else
+               clktmp = CLK_DEFAULT;
+       priv->can.clock.freq = clktmp;
+
+       if (cir[idx] != 0xff) {
+               priv->cpu_interface = cir[idx];
+       } else if (cir[0] != 0xff) {
+               priv->cpu_interface = cir[0];
+       } else {
+               /* The system clock may not exceed 10 MHz */
+               if (clktmp > 10000000) {
+                       priv->cpu_interface |= CPUIF_DSC;
+                       clktmp /= 2;
+               }
+               /* The memory clock may not exceed 8 MHz */
+               if (clktmp > 8000000)
+                       priv->cpu_interface |= CPUIF_DMC;
+       }
+
+       if (priv->cpu_interface & CPUIF_DSC)
+               priv->can.clock.freq /= 2;
+
+       if (bcr[idx] != 0xff)
+               priv->bus_config = bcr[idx];
+       else if (bcr[0] != 0xff)
+               priv->bus_config = bcr[0];
+       else
+               priv->bus_config = BCR_DEFAULT;
+
+       if (cor[idx] != 0xff)
+               priv->clkout = cor[idx];
+       else if (cor[0] != 0xff)
+               priv->clkout = cor[0];
+       else
+               priv->clkout = COR_DEFAULT;
+
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = register_cc770dev(dev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "couldn't register device (err=%d)\n", err);
+               goto exit_unmap;
+       }
+
+       dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+                priv->reg_base, dev->irq);
+       return 0;
+
+ exit_unmap:
+       if (mem[idx])
+               iounmap(base);
+ exit_release:
+       if (mem[idx])
+               release_mem_region(mem[idx], iosize);
+       else
+               release_region(port[idx], iosize);
+ exit:
+       return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct cc770_priv *priv = netdev_priv(dev);
+       int idx = pdev->id;
+
+       unregister_cc770dev(dev);
+       dev_set_drvdata(&pdev->dev, NULL);
+
+       if (mem[idx]) {
+               iounmap(priv->reg_base);
+               release_mem_region(mem[idx], CC770_IOSIZE);
+       } else {
+               if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+                       release_region(port[idx], CC770_IOSIZE_INDIRECT);
+               else
+                       release_region(port[idx], CC770_IOSIZE);
+       }
+       free_cc770dev(dev);
+
+       return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+       .probe = cc770_isa_probe,
+       .remove = __devexit_p(cc770_isa_remove),
+       .driver = {
+               .name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+       },
+};
+
+static int __init cc770_isa_init(void)
+{
+       int idx, err;
+
+       for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+               if ((port[idx] || mem[idx]) && irq[idx]) {
+                       cc770_isa_devs[idx] =
+                               platform_device_alloc(KBUILD_MODNAME, idx);
+                       if (!cc770_isa_devs[idx]) {
+                               err = -ENOMEM;
+                               goto exit_free_devices;
+                       }
+                       err = platform_device_add(cc770_isa_devs[idx]);
+                       if (err) {
+                               platform_device_put(cc770_isa_devs[idx]);
+                               goto exit_free_devices;
+                       }
+                       pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+                                "irq=%d\n",
+                                idx, port[idx], mem[idx], irq[idx]);
+               } else if (idx == 0 || port[idx] || mem[idx]) {
+                       pr_err("insufficient parameters supplied\n");
+                       err = -EINVAL;
+                       goto exit_free_devices;
+               }
+       }
+
+       err = platform_driver_register(&cc770_isa_driver);
+       if (err)
+               goto exit_free_devices;
+
+       pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+       return 0;
+
+exit_free_devices:
+       while (--idx >= 0) {
+               if (cc770_isa_devs[idx])
+                       platform_device_unregister(cc770_isa_devs[idx]);
+       }
+
+       return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+       int idx;
+
+       platform_driver_unregister(&cc770_isa_driver);
+       for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+               if (cc770_isa_devs[idx])
+                       platform_device_unregister(cc770_isa_devs[idx]);
+       }
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644 (file)
index 0000000..53115ee
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ *   static struct cc770_platform_data myboard_cc770_pdata = {
+ *           .osc_freq = 16000000,
+ *           .cir = 0x41,
+ *           .cor = 0x20,
+ *           .bcr = 0x40,
+ *   };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ *   can@3,100 {
+ *           compatible = "bosch,cc770";
+ *           reg = <3 0x100 0x80>;
+ *           interrupts = <2 0>;
+ *           interrupt-parent = <&mpic>;
+ *           bosch,external-clock-frequency = <16000000>;
+ *   };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK  16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+       return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+                                    u8 val)
+{
+       iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+                                           struct cc770_priv *priv)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const u32 *prop;
+       int prop_size;
+       u32 clkext;
+
+       prop = of_get_property(np, "bosch,external-clock-frequency",
+                              &prop_size);
+       if (prop && (prop_size ==  sizeof(u32)))
+               clkext = *prop;
+       else
+               clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+       priv->can.clock.freq = clkext;
+
+       /* The system clock may not exceed 10 MHz */
+       if (priv->can.clock.freq > 10000000) {
+               priv->cpu_interface |= CPUIF_DSC;
+               priv->can.clock.freq /= 2;
+       }
+
+       /* The memory clock may not exceed 8 MHz */
+       if (priv->can.clock.freq > 8000000)
+               priv->cpu_interface |= CPUIF_DMC;
+
+       if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+               priv->cpu_interface |= CPUIF_DMC;
+       if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+               priv->cpu_interface |= CPUIF_MUX;
+
+       if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+               priv->bus_config |= BUSCFG_CBY;
+       if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+               priv->bus_config |= BUSCFG_DR0;
+       if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+               priv->bus_config |= BUSCFG_DR1;
+       if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+               priv->bus_config |= BUSCFG_DT1;
+       if (of_get_property(np, "bosch,polarity-dominant", NULL))
+               priv->bus_config |= BUSCFG_POL;
+
+       prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+       if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+               u32 cdv = clkext / *prop;
+               int slew;
+
+               if (cdv > 0 && cdv < 16) {
+                       priv->cpu_interface |= CPUIF_CEN;
+                       priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+                       prop = of_get_property(np, "bosch,slew-rate",
+                                              &prop_size);
+                       if (prop && (prop_size == sizeof(u32))) {
+                               slew = *prop;
+                       } else {
+                               /* Determine default slew rate */
+                               slew = (CLKOUT_SL_MASK >>
+                                       CLKOUT_SL_SHIFT) -
+                                       ((cdv * clkext - 1) / 8000000);
+                               if (slew < 0)
+                                       slew = 0;
+                       }
+                       priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+                               CLKOUT_SL_MASK;
+               } else {
+                       dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+               }
+       }
+
+       return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+                                            struct cc770_priv *priv)
+{
+
+       struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+       priv->can.clock.freq = pdata->osc_freq;
+       if (priv->cpu_interface | CPUIF_DSC)
+               priv->can.clock.freq /= 2;
+       priv->clkout = pdata->cor;
+       priv->bus_config = pdata->bcr;
+       priv->cpu_interface = pdata->cir;
+
+       return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+       struct net_device *dev;
+       struct cc770_priv *priv;
+       struct resource *mem;
+       resource_size_t mem_size;
+       void __iomem *base;
+       int err, irq;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       irq = platform_get_irq(pdev, 0);
+       if (!mem || irq <= 0)
+               return -ENODEV;
+
+       mem_size = resource_size(mem);
+       if (!request_mem_region(mem->start, mem_size, pdev->name))
+               return -EBUSY;
+
+       base = ioremap(mem->start, mem_size);
+       if (!base) {
+               err = -ENOMEM;
+               goto exit_release_mem;
+       }
+
+       dev = alloc_cc770dev(0);
+       if (!dev) {
+               err = -ENOMEM;
+               goto exit_unmap_mem;
+       }
+
+       dev->irq = irq;
+       priv = netdev_priv(dev);
+       priv->read_reg = cc770_platform_read_reg;
+       priv->write_reg = cc770_platform_write_reg;
+       priv->irq_flags = IRQF_SHARED;
+       priv->reg_base = base;
+
+       if (pdev->dev.of_node)
+               err = cc770_get_of_node_data(pdev, priv);
+       else if (pdev->dev.platform_data)
+               err = cc770_get_platform_data(pdev, priv);
+       else
+               err = -ENODEV;
+       if (err)
+               goto exit_free_cc770;
+
+       dev_dbg(&pdev->dev,
+                "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+                "bus_config=0x%02x clkout=0x%02x\n",
+                priv->reg_base, dev->irq, priv->can.clock.freq,
+                priv->cpu_interface, priv->bus_config, priv->clkout);
+
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = register_cc770dev(dev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "couldn't register CC700 device (err=%d)\n", err);
+               goto exit_free_cc770;
+       }
+
+       return 0;
+
+exit_free_cc770:
+       free_cc770dev(dev);
+exit_unmap_mem:
+       iounmap(base);
+exit_release_mem:
+       release_mem_region(mem->start, mem_size);
+
+       return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
+       struct cc770_priv *priv = netdev_priv(dev);
+       struct resource *mem;
+
+       unregister_cc770dev(dev);
+       iounmap(priv->reg_base);
+       free_cc770dev(dev);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(mem->start, resource_size(mem));
+
+       return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+       {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+       {.compatible = "intc,82527"},  /* AN82527 from Intel CP */
+       {},
+};
+
+static struct platform_driver cc770_platform_driver = {
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = cc770_platform_table,
+       },
+       .probe = cc770_platform_probe,
+       .remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
index 25695bd..120f1ab 100644 (file)
@@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev)
 
        /* New-style flags. */
        dev->flags = IFF_NOARP;
-       dev->features = NETIF_F_NO_CSUM;
+       dev->features = NETIF_F_HW_CSUM;
 }
 
 struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
index e023379..165a4c7 100644 (file)
@@ -1060,20 +1060,7 @@ static struct platform_driver flexcan_driver = {
        .remove = __devexit_p(flexcan_remove),
 };
 
-static int __init flexcan_init(void)
-{
-       pr_info("%s netdevice driver\n", DRV_NAME);
-       return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
-       platform_driver_unregister(&flexcan_driver);
-       pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
 
 MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
              "Marc Kleine-Budde <kernel@pengutronix.de>");
index 32778d5..08c893c 100644 (file)
@@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = {
        .remove         = __devexit_p(ican3_remove),
 };
 
-static int __init ican3_init(void)
-{
-       return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
-       platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
 
 MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
 MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
index 5fedc33..5caa572 100644 (file)
@@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = {
 #endif
 };
 
-static int __init mpc5xxx_can_init(void)
-{
-       return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
-       platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
 
 MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
 MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
index ec4a311..1c82dd8 100644 (file)
@@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev)
 
        priv->open_time = jiffies;
 
-       clrbits8(&regs->canctl1, MSCAN_LISTEN);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               setbits8(&regs->canctl1, MSCAN_LISTEN);
+       else
+               clrbits8(&regs->canctl1, MSCAN_LISTEN);
 
        ret = mscan_start(dev);
        if (ret)
@@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void)
        priv->can.bittiming_const = &mscan_bittiming_const;
        priv->can.do_set_bittiming = mscan_do_set_bittiming;
        priv->can.do_set_mode = mscan_do_set_mode;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_LISTENONLY;
 
        for (i = 0; i < TX_QUEUE_SIZE; i++) {
                priv->tx_queue[i].id = i;
index fe9e64d..36e9d59 100644 (file)
@@ -6,7 +6,6 @@ if CAN_SJA1000
 
 config CAN_SJA1000_ISA
        tristate "ISA Bus based legacy SJA1000 driver"
-       depends on ISA
        ---help---
          This driver adds legacy support for SJA1000 chips connected to
          the ISA bus using I/O port, memory mapped or indirect access.
index 496223e..90c5c2d 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/delay.h>
@@ -44,9 +44,9 @@ static unsigned long port[MAXDEV];
 static unsigned long mem[MAXDEV];
 static int __devinitdata irq[MAXDEV];
 static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
 
 module_param_array(port, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number");
 module_param_array(mem, ulong, NULL, S_IRUGO);
 MODULE_PARM_DESC(mem, "I/O memory address");
 
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
 MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
 
 module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register "
 #define SJA1000_IOSIZE          0x20
 #define SJA1000_IOSIZE_INDIRECT 0x02
 
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
 static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
 {
        return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
        outb(val, base + 1);
 }
 
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
-       if (port[idx] || mem[idx]) {
-               if (irq[idx])
-                       return 1;
-       } else if (idx)
-               return 0;
-
-       dev_err(pdev, "insufficient parameters supplied\n");
-       return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
 {
        struct net_device *dev;
        struct sja1000_priv *priv;
        void __iomem *base = NULL;
        int iosize = SJA1000_IOSIZE;
+       int idx = pdev->id;
        int err;
 
+       dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+               idx, port[idx], mem[idx], irq[idx]);
+
        if (mem[idx]) {
                if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
                        err = -EBUSY;
@@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
        else
                priv->can.clock.freq = CLK_DEFAULT / 2;
 
-       if (ocr[idx] != -1)
-               priv->ocr = ocr[idx] & 0xff;
-       else if (ocr[0] != -1)
-               priv->ocr = ocr[0] & 0xff;
+       if (ocr[idx] != 0xff)
+               priv->ocr = ocr[idx];
+       else if (ocr[0] != 0xff)
+               priv->ocr = ocr[0];
        else
                priv->ocr = OCR_DEFAULT;
 
-       if (cdr[idx] != -1)
-               priv->cdr = cdr[idx] & 0xff;
-       else if (cdr[0] != -1)
-               priv->cdr = cdr[0] & 0xff;
+       if (cdr[idx] != 0xff)
+               priv->cdr = cdr[idx];
+       else if (cdr[0] != 0xff)
+               priv->cdr = cdr[0];
        else
                priv->cdr = CDR_DEFAULT;
 
-       dev_set_drvdata(pdev, dev);
-       SET_NETDEV_DEV(dev, pdev);
+       dev_set_drvdata(&pdev->dev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
 
        err = register_sja1000dev(dev);
        if (err) {
-               dev_err(pdev, "registering %s failed (err=%d)\n",
+               dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
                        DRV_NAME, err);
                goto exit_unmap;
        }
 
-       dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+       dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
                 DRV_NAME, priv->reg_base, dev->irq);
        return 0;
 
@@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
        return err;
 }
 
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
 {
-       struct net_device *dev = dev_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(&pdev->dev);
        struct sja1000_priv *priv = netdev_priv(dev);
+       int idx = pdev->id;
 
        unregister_sja1000dev(dev);
-       dev_set_drvdata(pdev, NULL);
+       dev_set_drvdata(&pdev->dev, NULL);
 
        if (mem[idx]) {
                iounmap(priv->reg_base);
@@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
        return 0;
 }
 
-static struct isa_driver sja1000_isa_driver = {
-       .match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
        .probe = sja1000_isa_probe,
        .remove = __devexit_p(sja1000_isa_remove),
        .driver = {
                .name = DRV_NAME,
+               .owner = THIS_MODULE,
        },
 };
 
 static int __init sja1000_isa_init(void)
 {
-       int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+       int idx, err;
+
+       for (idx = 0; idx < MAXDEV; idx++) {
+               if ((port[idx] || mem[idx]) && irq[idx]) {
+                       sja1000_isa_devs[idx] =
+                               platform_device_alloc(DRV_NAME, idx);
+                       if (!sja1000_isa_devs[idx]) {
+                               err = -ENOMEM;
+                               goto exit_free_devices;
+                       }
+                       err = platform_device_add(sja1000_isa_devs[idx]);
+                       if (err) {
+                               platform_device_put(sja1000_isa_devs[idx]);
+                               goto exit_free_devices;
+                       }
+                       pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+                                "irq=%d\n",
+                                DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+               } else if (idx == 0 || port[idx] || mem[idx]) {
+                               pr_err("%s: insufficient parameters supplied\n",
+                                      DRV_NAME);
+                               err = -EINVAL;
+                               goto exit_free_devices;
+               }
+       }
+
+       err = platform_driver_register(&sja1000_isa_driver);
+       if (err)
+               goto exit_free_devices;
+
+       pr_info("Legacy %s driver for max. %d devices registered\n",
+               DRV_NAME, MAXDEV);
+
+       return 0;
+
+exit_free_devices:
+       while (--idx >= 0) {
+               if (sja1000_isa_devs[idx])
+                       platform_device_unregister(sja1000_isa_devs[idx]);
+       }
 
-       if (!err)
-               printk(KERN_INFO
-                      "Legacy %s driver for max. %d devices registered\n",
-                      DRV_NAME, MAXDEV);
        return err;
 }
 
 static void __exit sja1000_isa_exit(void)
 {
-       isa_unregister_driver(&sja1000_isa_driver);
+       int idx;
+
+       platform_driver_unregister(&sja1000_isa_driver);
+       for (idx = 0; idx < MAXDEV; idx++) {
+               if (sja1000_isa_devs[idx])
+                       platform_device_unregister(sja1000_isa_devs[idx]);
+       }
 }
 
 module_init(sja1000_isa_init);
index c3dd9d0..f2683eb 100644 (file)
@@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = {
        .remove = __devexit_p(sja1000_ofp_remove),
 };
 
-static int __init sja1000_ofp_init(void)
-{
-       return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
-       return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
index d9fadc4..4f50145 100644 (file)
@@ -185,15 +185,4 @@ static struct platform_driver sp_driver = {
        },
 };
 
-static int __init sp_init(void)
-{
-       return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
-       platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
index a979b00..3f1ebcc 100644 (file)
@@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev)
 
        /* New-style flags. */
        dev->flags              = IFF_NOARP;
-       dev->features           = NETIF_F_NO_CSUM;
+       dev->features           = NETIF_F_HW_CSUM;
 }
 
 /******************************************
index 09a8b86..a7c77c7 100644 (file)
@@ -874,21 +874,9 @@ static struct platform_driver softing_driver = {
        .remove = __devexit_p(softing_pdev_remove),
 };
 
-MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
-       return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
-       platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
+module_platform_driver(softing_driver);
 
+MODULE_ALIAS("platform:softing");
 MODULE_DESCRIPTION("Softing DPRAM CAN driver");
 MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
 MODULE_LICENSE("GPL v2");
index 2adc294..df809e3 100644 (file)
@@ -1037,20 +1037,7 @@ static struct platform_driver ti_hecc_driver = {
        .resume = ti_hecc_resume,
 };
 
-static int __init ti_hecc_init_driver(void)
-{
-       printk(KERN_INFO DRV_DESC "\n");
-       return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
-       printk(KERN_INFO DRV_DESC " unloaded\n");
-       platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
 
 MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644 (file)
index 0000000..dd151d5
--- /dev/null
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+       depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+       tristate
+       default n
+
+config NET_DSA_MV88E6060
+       tristate "Marvell 88E6060 ethernet switch chip support"
+       select NET_DSA_TAG_TRAILER
+       ---help---
+         This enables support for the Marvell 88E6060 ethernet switch
+         chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+       bool
+       default n
+
+config NET_DSA_MV88E6131
+       tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+       select NET_DSA_MV88E6XXX
+       select NET_DSA_MV88E6XXX_NEED_PPU
+       select NET_DSA_TAG_DSA
+       ---help---
+         This enables support for the Marvell 88E6085/6095/6095F/6131
+         ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+       tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+       select NET_DSA_MV88E6XXX
+       select NET_DSA_TAG_EDSA
+       ---help---
+         This enables support for the Marvell 88E6123/6161/6165
+         ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644 (file)
index 0000000..f3bda05
--- /dev/null
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
similarity index 96%
rename from net/dsa/mv88e6060.c
rename to drivers/net/dsa/mv88e6060.c
index 8f4ff5a..7fc4e81 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 
 #define REG_PORT(p)            (8 + (p))
 #define REG_GLOBAL             0x0f
@@ -286,3 +286,8 @@ static void __exit mv88e6060_cleanup(void)
        unregister_switch_driver(&mv88e6060_switch_driver);
 }
 module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
similarity index 96%
rename from net/dsa/mv88e6123_61_65.c
rename to drivers/net/dsa/mv88e6123_61_65.c
index 52faaa2..c0a458f 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
@@ -419,7 +419,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
        return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
 }
 
-static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
        .tag_protocol           = cpu_to_be16(ETH_P_EDSA),
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6123_61_65_probe,
@@ -433,15 +433,6 @@ static struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
        .get_sset_count         = mv88e6123_61_65_get_sset_count,
 };
 
-static int __init mv88e6123_61_65_init(void)
-{
-       register_switch_driver(&mv88e6123_61_65_switch_driver);
-       return 0;
-}
-module_init(mv88e6123_61_65_init);
-
-static void __exit mv88e6123_61_65_cleanup(void)
-{
-       unregister_switch_driver(&mv88e6123_61_65_switch_driver);
-}
-module_exit(mv88e6123_61_65_cleanup);
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
similarity index 96%
rename from net/dsa/mv88e6131.c
rename to drivers/net/dsa/mv88e6131.c
index 9bd1061..e0eb682 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 /*
@@ -415,7 +415,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds)
        return ARRAY_SIZE(mv88e6131_hw_stats);
 }
 
-static struct dsa_switch_driver mv88e6131_switch_driver = {
+struct dsa_switch_driver mv88e6131_switch_driver = {
        .tag_protocol           = cpu_to_be16(ETH_P_DSA),
        .priv_size              = sizeof(struct mv88e6xxx_priv_state),
        .probe                  = mv88e6131_probe,
@@ -429,15 +429,7 @@ static struct dsa_switch_driver mv88e6131_switch_driver = {
        .get_sset_count         = mv88e6131_get_sset_count,
 };
 
-static int __init mv88e6131_init(void)
-{
-       register_switch_driver(&mv88e6131_switch_driver);
-       return 0;
-}
-module_init(mv88e6131_init);
-
-static void __exit mv88e6131_cleanup(void)
-{
-       unregister_switch_driver(&mv88e6131_switch_driver);
-}
-module_exit(mv88e6131_cleanup);
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
similarity index 93%
rename from net/dsa/mv88e6xxx.c
rename to drivers/net/dsa/mv88e6xxx.c
index efe661a..5467c04 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include "dsa_priv.h"
+#include <net/dsa.h>
 #include "mv88e6xxx.h"
 
 /*
@@ -520,3 +520,30 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
 
        mutex_unlock(&ps->stats_mutex);
 }
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+       register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+       register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+       return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+       unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+       unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
similarity index 95%
rename from net/dsa/mv88e6xxx.h
rename to drivers/net/dsa/mv88e6xxx.h
index 61156ca..fc2cd7b 100644 (file)
@@ -71,6 +71,9 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
                                 int nr_stats, struct mv88e6xxx_hw_stat *stats,
                                 int port, uint64_t *data);
 
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
 #define REG_READ(addr, reg)                                            \
        ({                                                              \
                int __ret;                                              \
index a7c5e88..087648e 100644 (file)
@@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->flags &= ~IFF_MULTICAST;
        dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
-       dev->features   |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+       dev->features   |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
        random_ether_addr(dev->dev_addr);
 }
 
index 972f80e..da410f0 100644 (file)
@@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       snprintf(info->bus_info, sizeof(info->bus_info),
+               "PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
index b42c06b..8153a3e 100644 (file)
@@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev,
 {
        struct vortex_private *vp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
        if (VORTEX_PCI(vp)) {
-               strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+               strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+                       sizeof(info->bus_info));
        } else {
                if (VORTEX_EISA(vp))
-                       strcpy(info->bus_info, dev_name(vp->gendev));
+                       strlcpy(info->bus_info, dev_name(vp->gendev),
+                               sizeof(info->bus_info));
                else
-                       sprintf(info->bus_info, "EISA 0x%lx %d",
-                                       dev->base_addr, dev->irq);
+                       snprintf(info->bus_info, sizeof(info->bus_info),
+                               "EISA 0x%lx %d", dev->base_addr, dev->irq);
        }
 }
 
index 20ea075..6d6bc75 100644 (file)
@@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
        smp_rmb();
        if(tp->card_state == Sleeping) {
-               strcpy(info->fw_version, "Sleep image");
+               strlcpy(info->fw_version, "Sleep image",
+                       sizeof(info->fw_version));
        } else {
                INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
                if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
-                       strcpy(info->fw_version, "Unknown runtime");
+                       strlcpy(info->fw_version, "Unknown runtime",
+                               sizeof(info->fw_version));
                } else {
                        u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
-                       snprintf(info->fw_version, 32, "%02x.%03x.%03x",
-                                sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
-                                sleep_ver & 0xfff);
+                       snprintf(info->fw_version, sizeof(info->fw_version),
+                               "%02x.%03x.%03x", sleep_ver >> 24,
+                               (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
                }
        }
 
-       strcpy(info->driver, KBUILD_MODNAME);
-       strcpy(info->bus_info, pci_name(pci_dev));
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static int
index 58a12e4..ef325ff 100644 (file)
@@ -14,8 +14,6 @@
 
 #define TX_PAGES 12    /* Two Tx slots */
 
-#define ETHER_ADDR_LEN 6
-
 /* The 8390 specific per-packet-header format. */
 struct e8390_pkt_hdr {
   unsigned char status; /* status */
index 5477373..3ad5d2f 100644 (file)
@@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
     i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
     if (i) return i;
 
-    for(i = 0; i < ETHER_ADDR_LEN; i++)
+    for (i = 0; i < ETH_ALEN; i++)
        dev->dev_addr[i] = SA_prom[i];
 
     printk(" %pM\n", dev->dev_addr);
index e9f8432..9e8ba4f 100644 (file)
@@ -735,15 +735,14 @@ static int ax_init_dev(struct net_device *dev)
        if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
                ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
                        ei_local->mem + E8390_CMD); /* 0x61 */
-               for (i = 0; i < ETHER_ADDR_LEN; i++)
+               for (i = 0; i < ETH_ALEN; i++)
                        dev->dev_addr[i] =
                                ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
        }
 
        if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
            ax->plat->mac_addr)
-               memcpy(dev->dev_addr, ax->plat->mac_addr,
-                      ETHER_ADDR_LEN);
+               memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
 
        ax_reset_8390(dev);
 
@@ -991,18 +990,7 @@ static struct platform_driver axdrv = {
        .resume         = ax_resume,
 };
 
-static int __init axdrv_init(void)
-{
-       return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
-       platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
 
 MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
index 7a09575..6428f9e 100644 (file)
@@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
                goto out;
        }
 
-       for (i = 0; i < ETHER_ADDR_LEN ; i++)
+       for (i = 0; i < ETH_ALEN ; i++)
                dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
 
 /*     Check the Racal vendor ID as well. */
index eeac843..d42938b 100644 (file)
@@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
        /* Retrieve and checksum the station address. */
        outw(MAC_Page, ioaddr + HP_PAGING);
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++) {
+       for(i = 0; i < ETH_ALEN; i++) {
                unsigned char inval = inb(ioaddr + 8 + i);
                dev->dev_addr[i] = inval;
                checksum += inval;
index 18564d4..113f1e0 100644 (file)
@@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
 
        printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for(i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = inb(ioaddr + i);
 
        printk(" %pM", dev->dev_addr);
index 3dac937..5370c88 100644 (file)
@@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
     if (!dev)
        return -ENOMEM;
 
-    for(j = 0; j < ETHER_ADDR_LEN; j++)
+    for (j = 0; j < ETH_ALEN; j++)
        dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
 
     /* We must set the 8390 for word mode. */
index f9888d2..69490ae 100644 (file)
@@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
                || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
                || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
                printk("lne390.c: card not found");
-               for(i = 0; i < ETHER_ADDR_LEN; i++)
+               for (i = 0; i < ETH_ALEN; i++)
                        printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
                printk(" (invalid prefix).\n");
                return -ENODEV;
        }
 #endif
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
        printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
               0xa+revision, ioaddr/0x1000, dev->dev_addr);
index cd36a6a..9b9c77d 100644 (file)
@@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
 
        dev->base_addr = ioaddr;
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = SA_prom[i];
        printk(" %pM\n", dev->dev_addr);
 
index 1063093..f92ea2a 100644 (file)
@@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
 #ifdef CONFIG_PLAT_MAPPI
        outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
                ioaddr + E8390_CMD); /* 0x61 */
-       for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+       for (i = 0; i < ETH_ALEN; i++) {
                dev->dev_addr[i] = SA_prom[i]
                        = inb_p(ioaddr + EN1_PHYS_SHIFT(i));
        }
 #else
-       for(i = 0; i < ETHER_ADDR_LEN; i++) {
+       for (i = 0; i < ETH_ALEN; i++) {
                dev->dev_addr[i] = SA_prom[i];
        }
 #endif
index 70cdc69..922b320 100644 (file)
@@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
 
        dev->base_addr = base_addr;
 
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = SA_prom[i];
 
        printk(" %pM\n", dev->dev_addr);
index 3992342..3fab04a 100644 (file)
@@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
        struct ei_device *ei = netdev_priv(dev);
        struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static const struct ethtool_ops ne2k_pci_ethtool_ops = {
index 243ed2a..2a3e805 100644 (file)
@@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device)
 #endif
 
        port_index = inb(ioaddr + NE3210_CFG2) >> 6;
-       for(i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
        printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
                edev->slot, ifmap[port_index], dev->dev_addr);
index d85f0a8..3b90375 100644 (file)
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
 #ifdef CONFIG_SH_STANDARD_BIOS
   sh_bios_get_node_addr (stnic_eadr);
 #endif
-  for (i = 0; i < ETHER_ADDR_LEN; i++)
+  for (i = 0; i < ETH_ALEN; i++)
     dev->dev_addr[i] = stnic_eadr[i];
 
   /* Set the base address to point to the NIC, not the "real" base! */
index 3aa9fe9..bcd2732 100644 (file)
@@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
        if (i)
                return i;
 
-       for (i = 0; i < ETHER_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = SA_prom[i];
 
        pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
index 597f4d4..3474a61 100644 (file)
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig"
 source "drivers/net/ethernet/adi/Kconfig"
 source "drivers/net/ethernet/broadcom/Kconfig"
 source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
index be5dde0..cd6d69a 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ATMEL) += cadence/
 obj-$(CONFIG_NET_BFIN) += adi/
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
index 6d9f691..cb4f38a 100644 (file)
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
 
 
 #ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct netdev_private *np = netdev_priv(dev);
 
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, np->active_vlans);
        set_rx_mode(dev);
        spin_unlock(&np->lock);
+
+       return 0;
 }
 
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct netdev_private *np = netdev_priv(dev);
 
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, np->active_vlans);
        set_rx_mode(dev);
        spin_unlock(&np->lock);
+
+       return 0;
 }
 #endif /* VLAN_SUPPORT */
 
@@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev)
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct netdev_private *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
index 442fefa..c885aa9 100644 (file)
@@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = {
        .remove = __devexit_p(greth_of_remove),
 };
 
-static int __init greth_init(void)
-{
-       return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
-       platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
 
 MODULE_AUTHOR("Aeroflex Gaisler AB.");
 MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
index a9745f4..33e0a8c 100644 (file)
@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
        writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
 
        /* Setting the MAC address to the device */
-       for(i = 0; i < ETH_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                writeb( dev->dev_addr[i], mmio + PADR + i );
 
        /* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        struct pci_dev *pci_dev = lp->pci_dev;
-       strcpy (info->driver, MODULE_NAME);
-       strcpy (info->version, MODULE_VERS);
-       sprintf(info->fw_version,"%u",chip_version);
-       strcpy (info->bus_info, pci_name(pci_dev));
+       strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+       snprintf(info->fw_version, sizeof(info->fw_version),
+               "%u", chip_version);
+       strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
 }
 
 static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
        spin_lock_irq(&lp->lock);
        /* Setting the MAC address to the device */
-       for(i = 0; i < ETH_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                writeb( dev->dev_addr[i], lp->mmio + PADR + i );
 
        spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
        }
 
        /* Initializing MAC address */
-       for(i = 0; i < ETH_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = readb(lp->mmio + PADR + i);
 
        /* Setting user defined parametrs */
index 2ff2e7a..5bbb53a 100644 (file)
@@ -586,7 +586,6 @@ typedef enum {
 
 #define PKT_BUFF_SZ                    1536
 #define MIN_PKT_LEN                    60
-#define ETH_ADDR_LEN                   6
 
 #define  AMD8111E_TX_TIMEOUT           (3 * HZ)/* 3 sec */
 #define SOFT_TIMER_FREQ                0xBEBC  /* 0.5 sec */
index 4865ff1..cc9262b 100644 (file)
@@ -1339,18 +1339,7 @@ static struct platform_driver au1000_eth_driver = {
                .owner  = THIS_MODULE,
        },
 };
-MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
-       return platform_driver_register(&au1000_eth_driver);
-}
 
-static void __exit au1000_exit_module(void)
-{
-       platform_driver_unregister(&au1000_eth_driver);
-}
+module_platform_driver(au1000_eth_driver);
 
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
+MODULE_ALIAS("platform:au1000-eth");
index 3accd5d..6be0dd6 100644 (file)
@@ -160,8 +160,6 @@ Include Files
 Defines
 ---------------------------------------------------------------------------- */
 
-#define ETHER_ADDR_LEN                 ETH_ALEN
-                                       /* 6 bytes in an Ethernet Address */
 #define MACE_LADRF_LEN                 8
                                        /* 8 bytes in Logical Address Filter */
 
@@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
        }
   }
   /* Set PADR register */
-  for (i = 0; i < ETHER_ADDR_LEN; i++)
+  for (i = 0; i < ETH_ALEN; i++)
     mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
 
   /* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link)
 
   /* Read the ethernet address from the CIS. */
   len = pcmcia_get_tuple(link, 0x80, &buf);
-  if (!buf || len < ETHER_ADDR_LEN) {
+  if (!buf || len < ETH_ALEN) {
          kfree(buf);
          goto failed;
   }
-  memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+  memcpy(dev->dev_addr, buf, ETH_ALEN);
   kfree(buf);
 
   /* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       snprintf(info->bus_info, sizeof(info->bus_info),
+               "PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@ Output
 static void set_multicast_list(struct net_device *dev)
 {
   mace_private *lp = netdev_priv(dev);
-  int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+  int adr[ETH_ALEN] = {0}; /* Ethernet address */
   struct netdev_hw_addr *ha;
 
 #ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev)
     /* Calculate multicast logical address filter */
     memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
     netdev_for_each_mc_addr(ha, dev) {
-      memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+      memcpy(adr, ha->addr, ETH_ALEN);
       BuildLAF(lp->multicast_ladrf, adr);
     }
   }
index f92bc6e..20e6dab 100644 (file)
@@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
 {
        struct pcnet32_private *lp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        if (lp->pci_dev)
-               strcpy(info->bus_info, pci_name(lp->pci_dev));
+               strlcpy(info->bus_info, pci_name(lp->pci_dev),
+                       sizeof(info->bus_info));
        else
-               sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+               snprintf(info->bus_info, sizeof(info->bus_info),
+                       "VLB 0x%lx", dev->base_addr);
 }
 
 static u32 pcnet32_get_link(struct net_device *dev)
index 8fda457..7ea16d3 100644 (file)
@@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = {
        .remove         = __devexit_p(sunlance_sbus_remove),
 };
 
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
-       return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
-       platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
index 7be884d..0a9326a 100644 (file)
@@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->driver,  atl1c_driver_name, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, atl1c_driver_version,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
index 02c7ed8..b859124 100644 (file)
@@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev)
        }
 }
 
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
        }
 }
 
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atl1c_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
                roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
 }
 
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atl1c_vlan_mode(netdev, features);
index 6269438..6e61f9f 100644 (file)
@@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
 {
        struct atl1e_adapter *adapter = netdev_priv(netdev);
 
-       strncpy(drvinfo->driver,  atl1e_driver_name, 32);
-       strncpy(drvinfo->version, atl1e_driver_version, 32);
-       strncpy(drvinfo->fw_version, "L1e", 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  atl1e_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, atl1e_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = atl1e_get_regs_len(netdev);
index 95483bc..c915c08 100644 (file)
@@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev)
        }
 }
 
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
        }
 }
 
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atl1e_adapter *adapter = netdev_priv(netdev);
        u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
        return 0;
 }
 
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atl1e_vlan_mode(netdev, features);
index 33a4e35..9bd2049 100644 (file)
@@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
        strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
                sizeof(drvinfo->version));
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
        drvinfo->eedump_len = ATL1_EEDUMP_LEN;
index 1feae59..071f4c8 100644 (file)
@@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
     synchronize_irq(adapter->pdev->irq);
 }
 
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl)
        }
 }
 
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atl2_adapter *adapter = netdev_priv(netdev);
        u32 ctrl;
@@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter)
        atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
 }
 
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev,
 {
        struct atl2_adapter *adapter = netdev_priv(netdev);
 
-       strncpy(drvinfo->driver,  atl2_driver_name, 32);
-       strncpy(drvinfo->version, atl2_driver_version, 32);
-       strncpy(drvinfo->fw_version, "L2", 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  atl2_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, atl2_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = atl2_get_regs_len(netdev);
index aabcf4b..8ff7411 100644 (file)
@@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work)
        spin_unlock_irqrestore(&adapter->lock, flags);
 }
 
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
 {
        if (features & NETIF_F_HW_VLAN_RX) {
                /* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl)
        }
 }
 
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct atlx_adapter *adapter = netdev_priv(netdev);
        unsigned long flags;
@@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
        atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
 }
 
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                atlx_vlan_mode(netdev, features);
index 965c723..787e175 100644 (file)
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
        mutex_lock(&bp->cnic_lock);
        cp->drv_state = 0;
        bnapi->cnic_present = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_lock);
        synchronize_rcu();
        return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
 
        if (bp->autoneg & AUTONEG_SPEED) {
                u32 adv_reg, adv1000_reg;
-               u32 new_adv_reg = 0;
-               u32 new_adv1000_reg = 0;
+               u32 new_adv = 0;
+               u32 new_adv1000 = 0;
 
                bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
                adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
                bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
                adv1000_reg &= PHY_ALL_1000_SPEED;
 
-               if (bp->advertising & ADVERTISED_10baseT_Half)
-                       new_adv_reg |= ADVERTISE_10HALF;
-               if (bp->advertising & ADVERTISED_10baseT_Full)
-                       new_adv_reg |= ADVERTISE_10FULL;
-               if (bp->advertising & ADVERTISED_100baseT_Half)
-                       new_adv_reg |= ADVERTISE_100HALF;
-               if (bp->advertising & ADVERTISED_100baseT_Full)
-                       new_adv_reg |= ADVERTISE_100FULL;
-               if (bp->advertising & ADVERTISED_1000baseT_Full)
-                       new_adv1000_reg |= ADVERTISE_1000FULL;
+               new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+               new_adv |= ADVERTISE_CSMA;
+               new_adv |= bnx2_phy_get_pause_adv(bp);
 
-               new_adv_reg |= ADVERTISE_CSMA;
+               new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
 
-               new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
-               if ((adv1000_reg != new_adv1000_reg) ||
-                       (adv_reg != new_adv_reg) ||
+               if ((adv1000_reg != new_adv1000) ||
+                       (adv_reg != new_adv) ||
                        ((bmcr & BMCR_ANENABLE) == 0)) {
 
-                       bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
-                       bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+                       bnx2_write_phy(bp, bp->mii_adv, new_adv);
+                       bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
                                BMCR_ANENABLE);
                }
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
 }
 
 static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
 {
-       struct sk_buff *skb;
+       u8 *data;
        struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
        dma_addr_t mapping;
        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
-       unsigned long align;
 
-       skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
-       if (skb == NULL) {
+       data = kmalloc(bp->rx_buf_size, gfp);
+       if (!data)
                return -ENOMEM;
-       }
 
-       if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
-               skb_reserve(skb, BNX2_RX_ALIGN - align);
-
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+       mapping = dma_map_single(&bp->pdev->dev,
+                                get_l2_fhdr(data),
+                                bp->rx_buf_use_size,
                                 PCI_DMA_FROMDEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-               dev_kfree_skb(skb);
+               kfree(data);
                return -EIO;
        }
 
-       rx_buf->skb = skb;
-       rx_buf->desc = (struct l2_fhdr *) skb->data;
+       rx_buf->data = data;
        dma_unmap_addr_set(rx_buf, mapping, mapping);
 
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
        u16 hw_cons, sw_cons, sw_ring_cons;
        int tx_pkt = 0, index;
+       unsigned int tx_bytes = 0;
        struct netdev_queue *txq;
 
        index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
                sw_cons = NEXT_TX_BD(sw_cons);
 
+               tx_bytes += skb->len;
                dev_kfree_skb(skb);
                tx_pkt++;
                if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        hw_cons = bnx2_get_hw_tx_cons(bnapi);
        }
 
+       netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
        txr->hw_tx_cons = hw_cons;
        txr->tx_cons = sw_cons;
 
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
 }
 
 static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
-                 struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+                  u8 *data, u16 cons, u16 prod)
 {
        struct sw_bd *cons_rx_buf, *prod_rx_buf;
        struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
 
        rxr->rx_prod_bseq += bp->rx_buf_use_size;
 
-       prod_rx_buf->skb = skb;
-       prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+       prod_rx_buf->data = data;
 
        if (cons == prod)
                return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
 }
 
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
            unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
            u32 ring_idx)
 {
        int err;
        u16 prod = ring_idx & 0xffff;
+       struct sk_buff *skb;
 
-       err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+       err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
        if (unlikely(err)) {
-               bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+               bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
                if (hdr_len) {
                        unsigned int raw_len = len + 4;
                        int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
 
                        bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
                }
-               return err;
+               return NULL;
        }
 
-       skb_reserve(skb, BNX2_RX_OFFSET);
        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
                         PCI_DMA_FROMDEVICE);
-
+       skb = build_skb(data);
+       if (!skb) {
+               kfree(data);
+               goto error;
+       }
+       skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
        if (hdr_len == 0) {
                skb_put(skb, len);
-               return 0;
+               return skb;
        } else {
                unsigned int i, frag_len, frag_size, pages;
                struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                        skb_frag_size_sub(frag, tail);
                                        skb->data_len -= tail;
                                }
-                               return 0;
+                               return skb;
                        }
                        rx_pg = &rxr->rx_pg_ring[pg_cons];
 
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                rxr->rx_pg_prod = pg_prod;
                                bnx2_reuse_rx_skb_pages(bp, rxr, skb,
                                                        pages - i);
-                               return err;
+                               return NULL;
                        }
 
                        dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                rxr->rx_pg_prod = pg_prod;
                rxr->rx_pg_cons = pg_cons;
        }
-       return 0;
+       return skb;
 }
 
 static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                struct sw_bd *rx_buf, *next_rx_buf;
                struct sk_buff *skb;
                dma_addr_t dma_addr;
+               u8 *data;
 
                sw_ring_cons = RX_RING_IDX(sw_cons);
                sw_ring_prod = RX_RING_IDX(sw_prod);
 
                rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
-               skb = rx_buf->skb;
-               prefetchw(skb);
-
-               next_rx_buf =
-                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
-               prefetch(next_rx_buf->desc);
+               data = rx_buf->data;
+               rx_buf->data = NULL;
 
-               rx_buf->skb = NULL;
+               rx_hdr = get_l2_fhdr(data);
+               prefetch(rx_hdr);
 
                dma_addr = dma_unmap_addr(rx_buf, mapping);
 
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
                        PCI_DMA_FROMDEVICE);
 
-               rx_hdr = rx_buf->desc;
+               next_rx_buf =
+                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+               prefetch(get_l2_fhdr(next_rx_buf->data));
+
                len = rx_hdr->l2_fhdr_pkt_len;
                status = rx_hdr->l2_fhdr_status;
 
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                                       L2_FHDR_ERRORS_TOO_SHORT |
                                       L2_FHDR_ERRORS_GIANT_FRAME))) {
 
-                       bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+                       bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
                                          sw_ring_prod);
                        if (pg_ring_used) {
                                int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                len -= 4;
 
                if (len <= bp->rx_copy_thresh) {
-                       struct sk_buff *new_skb;
-
-                       new_skb = netdev_alloc_skb(bp->dev, len + 6);
-                       if (new_skb == NULL) {
-                               bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+                       skb = netdev_alloc_skb(bp->dev, len + 6);
+                       if (skb == NULL) {
+                               bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
                                                  sw_ring_prod);
                                goto next_rx;
                        }
 
                        /* aligned copy */
-                       skb_copy_from_linear_data_offset(skb,
-                                                        BNX2_RX_OFFSET - 6,
-                                     new_skb->data, len + 6);
-                       skb_reserve(new_skb, 6);
-                       skb_put(new_skb, len);
+                       memcpy(skb->data,
+                              (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+                              len + 6);
+                       skb_reserve(skb, 6);
+                       skb_put(skb, len);
 
-                       bnx2_reuse_rx_skb(bp, rxr, skb,
+                       bnx2_reuse_rx_data(bp, rxr, data,
                                sw_ring_cons, sw_ring_prod);
 
-                       skb = new_skb;
-               } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
-                          dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
-                       goto next_rx;
-
+               } else {
+                       skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+                                         (sw_ring_cons << 16) | sw_ring_prod);
+                       if (!skb)
+                               goto next_rx;
+               }
                if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
                    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
                        __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+               if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
                                    ring_num, i, bp->rx_ring_size);
                        break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
        rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
 
        rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
-               sizeof(struct skb_shared_info);
+               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
        bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
        bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
        }
 
        bp->rx_buf_use_size = rx_size;
-       /* hw alignment */
-       bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+       /* hw alignment + build_skb() overhead*/
+       bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+               NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
        bp->rx_ring_size = size;
        bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        }
                        dev_kfree_skb(skb);
                }
+               netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
        }
 }
 
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_ring_idx; j++) {
                        struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
-                       struct sk_buff *skb = rx_buf->skb;
+                       u8 *data = rx_buf->data;
 
-                       if (skb == NULL)
+                       if (data == NULL)
                                continue;
 
                        dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
                                         bp->rx_buf_use_size,
                                         PCI_DMA_FROMDEVICE);
 
-                       rx_buf->skb = NULL;
+                       rx_buf->data = NULL;
 
-                       dev_kfree_skb(skb);
+                       kfree(data);
                }
                for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
                        bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 {
        unsigned int pkt_size, num_pkts, i;
-       struct sk_buff *skb, *rx_skb;
+       struct sk_buff *skb;
+       u8 *data;
        unsigned char *packet;
        u16 rx_start_idx, rx_idx;
        dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        }
 
        rx_buf = &rxr->rx_buf_ring[rx_start_idx];
-       rx_skb = rx_buf->skb;
+       data = rx_buf->data;
 
-       rx_hdr = rx_buf->desc;
-       skb_reserve(rx_skb, BNX2_RX_OFFSET);
+       rx_hdr = get_l2_fhdr(data);
+       data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
 
        dma_sync_single_for_cpu(&bp->pdev->dev,
                dma_unmap_addr(rx_buf, mapping),
-               bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+               bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
 
        if (rx_hdr->l2_fhdr_status &
                (L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        }
 
        for (i = 14; i < pkt_size; i++) {
-               if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+               if (*(data + i) != (unsigned char) (i & 0xff)) {
                        goto loopback_test_done;
                }
        }
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        prod = NEXT_TX_BD(prod);
        txr->tx_prod_bseq += skb->len;
 
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       strcpy(info->bus_info, pci_name(bp->pdev));
-       strcpy(info->fw_version, bp->fw_version);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+       strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
 }
 
 #define BNX2_REGDUMP_LEN               (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
        return 0;
 }
 
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
 }
 
 static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
index 99d31a7..1db2d51 100644 (file)
@@ -6563,12 +6563,25 @@ struct l2_fhdr {
 #define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
 #define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
 
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
 struct sw_bd {
-       struct sk_buff          *skb;
-       struct l2_fhdr          *desc;
+       u8                      *data;
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+       return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
 struct sw_pg {
        struct page             *page;
        DEFINE_DMA_UNMAP_ADDR(mapping);
index aec7212..8c73d34 100644 (file)
@@ -23,8 +23,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.70.30-0"
-#define DRV_MODULE_RELDATE      "2011/10/25"
+#define DRV_MODULE_VERSION      "1.70.35-0"
+#define DRV_MODULE_RELDATE      "2011/11/10"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@ enum {
 #define FCOE_TXQ_IDX(bp)       (MAX_ETH_TXQ_IDX(bp))
 
 /* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
 struct sw_rx_bd {
-       struct sk_buff  *skb;
+       u8              *data;
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
@@ -411,8 +416,7 @@ union db_prod {
 
 
 /* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN                        ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
-                                        BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN                        (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
 #define RX_SGE_MASK_LEN_MASK           (RX_SGE_MASK_LEN - 1)
 #define NEXT_SGE_MASK_ELEM(el)         (((el) + 1) & RX_SGE_MASK_LEN_MASK)
 
@@ -425,8 +429,8 @@ union host_hc_status_block {
 
 struct bnx2x_agg_info {
        /*
-        * First aggregation buffer is an skb, the following - are pages.
-        * We will preallocate the skbs for each aggregation when
+        * First aggregation buffer is a data buffer, the following - are pages.
+        * We will preallocate the data buffer for each aggregation when
         * we open the interface and will replace the BD at the consumer
         * with this one when we receive the TPA_START CQE in order to
         * keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@ struct bnx2x_agg_info {
        u16                     parsing_flags;
        u16                     vlan_tag;
        u16                     len_on_bd;
+       u32                     rxhash;
 };
 
 #define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@ struct bnx2x_fastpath {
        __le16                  fp_hc_idx;
 
        u8                      index;          /* number in fp array */
+       u8                      rx_queue;       /* index for skb_record */
        u8                      cl_id;          /* eth client id */
        u8                      cl_qzone_id;
        u8                      fw_sb_id;       /* status block number in FW */
@@ -881,6 +887,8 @@ struct bnx2x_common {
 #define CHIP_PORT_MODE_NONE                    0x2
 #define CHIP_MODE(bp)                  (bp->common.chip_port_mode)
 #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+       u32                     boot_mode;
 };
 
 /* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@ struct bnx2x_slowpath {
 
        u32                             wb_comp;
        u32                             wb_data[4];
+
+       union drv_info_to_mcp           drv_info_to_mcp;
 };
 
 #define bnx2x_sp(bp, var)              (&bp->slowpath->var)
@@ -1122,18 +1132,21 @@ enum {
 enum {
        BNX2X_PORT_QUERY_IDX,
        BNX2X_PF_QUERY_IDX,
+       BNX2X_FCOE_QUERY_IDX,
        BNX2X_FIRST_QUEUE_QUERY_IDX,
 };
 
 struct bnx2x_fw_stats_req {
        struct stats_query_header hdr;
-       struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+       struct stats_query_entry query[FP_SB_MAX_E1x+
+               BNX2X_FIRST_QUEUE_QUERY_IDX];
 };
 
 struct bnx2x_fw_stats_data {
        struct stats_counter    storm_counters;
        struct per_port_stats   port;
        struct per_pf_stats     pf;
+       struct fcoe_statistics_params   fcoe;
        struct per_queue_stats  queue_stats[1];
 };
 
@@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data {
 enum {
        BNX2X_SP_RTNL_SETUP_TC,
        BNX2X_SP_RTNL_TX_TIMEOUT,
+       BNX2X_SP_RTNL_FAN_FAILURE,
 };
 
 
@@ -1186,10 +1200,20 @@ struct bnx2x {
 #define ETH_MAX_JUMBO_PACKET_SIZE      9600
 
        /* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT           ((L1_CACHE_SHIFT < 8) ? \
-                                        L1_CACHE_SHIFT : 8)
-       /* FW use 2 Cache lines Alignment for start packet and size  */
-#define BNX2X_FW_RX_ALIGN              (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT           min(8, L1_CACHE_SHIFT)
+
+       /* FW uses 2 Cache lines Alignment for start packet and size
+        *
+        * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+        * at the end of skb->data, to avoid wasting a full cache line.
+        * This reduces memory use (skb->truesize).
+        */
+#define BNX2X_FW_RX_ALIGN_START        (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END                                  \
+       max(1UL << BNX2X_RX_ALIGN_SHIFT,                        \
+           SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
 #define BNX2X_PXP_DRAM_ALIGN           (BNX2X_RX_ALIGN_SHIFT - 5)
 
        struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@ struct bnx2x {
 #define NO_ISCSI_OOO_FLAG              (1 << 13)
 #define NO_ISCSI_FLAG                  (1 << 14)
 #define NO_FCOE_FLAG                   (1 << 15)
+#define BC_SUPPORTS_PFC_STATS          (1 << 17)
 
 #define NO_ISCSI(bp)           ((bp)->flags & NO_ISCSI_FLAG)
 #define NO_ISCSI_OOO(bp)       ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
                              AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
 
-#define RSS_FLAGS(bp) \
-               (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
-                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
-                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
-                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
-                (bp->multi_mode << \
-                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
 #define MULTI_MASK                     0x7f
 
 
@@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_VPD_LEN                  128
 #define VENDOR_ID_LEN                  4
 
+int bnx2x_close(struct net_device *dev);
+
 /* Congestion management fairness mode */
 #define CMNG_FNS_NONE          0
 #define CMNG_FNS_MINMAX                1
@@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = {
 
 void bnx2x_set_ethtool_ops(struct net_device *netdev);
 void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+       ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+       (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
 #endif /* bnx2x.h */
index 580b44e..64f5cf5 100644 (file)
@@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
  * @to:                destination FP index
  *
  * Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
  */
 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
 {
        struct bnx2x_fastpath *from_fp = &bp->fp[from];
        struct bnx2x_fastpath *to_fp = &bp->fp[to];
-       struct napi_struct orig_napi = to_fp->napi;
+
+       /* Copy the NAPI object as it has been already initialized */
+       from_fp->napi = to_fp->napi;
+
        /* Move bnx2x_fastpath contents */
        memcpy(to_fp, from_fp, sizeof(*to_fp));
        to_fp->index = to;
-
-       /* Restore the NAPI object as it has been already initialized */
-       to_fp->napi = orig_napi;
 }
 
 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
  * return idx of last bd freed
  */
 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
-                            u16 idx)
+                            u16 idx, unsigned int *pkts_compl,
+                            unsigned int *bytes_compl)
 {
        struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
        struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 
        /* release skb */
        WARN_ON(!skb);
+       if (skb) {
+               (*pkts_compl)++;
+               (*bytes_compl) += skb->len;
+       }
        dev_kfree_skb_any(skb);
        tx_buf->first_bd = 0;
        tx_buf->skb = NULL;
@@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 {
        struct netdev_queue *txq;
        u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (unlikely(bp->panic))
@@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
                                      " pkt_cons %u\n",
                   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 
-               bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+               bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+                   &pkts_compl, &bytes_compl);
+
                sw_cons++;
        }
 
+       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
        txdata->tx_pkt_cons = sw_cons;
        txdata->tx_bd_cons = bd_cons;
 
@@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
           fp->last_max_sge, fp->rx_sge_prod);
 }
 
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+                           const struct eth_fast_path_rx_cqe *cqe)
+{
+       /* Set Toeplitz hash from CQE */
+       if ((bp->dev->features & NETIF_F_RXHASH) &&
+           (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+               return le32_to_cpu(cqe->rss_hash_result);
+       return 0;
+}
+
 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-                           struct sk_buff *skb, u16 cons, u16 prod,
+                           u16 cons, u16 prod,
                            struct eth_fast_path_rx_cqe *cqe)
 {
        struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
        if (tpa_info->tpa_state != BNX2X_TPA_STOP)
                BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 
-       /* Try to map an empty skb from the aggregation info  */
+       /* Try to map an empty data buffer from the aggregation info  */
        mapping = dma_map_single(&bp->pdev->dev,
-                                first_buf->skb->data,
+                                first_buf->data + NET_SKB_PAD,
                                 fp->rx_buf_size, DMA_FROM_DEVICE);
        /*
         *  ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                /* Move the BD from the consumer to the producer */
-               bnx2x_reuse_rx_skb(fp, cons, prod);
+               bnx2x_reuse_rx_data(fp, cons, prod);
                tpa_info->tpa_state = BNX2X_TPA_ERROR;
                return;
        }
 
-       /* move empty skb from pool to prod */
-       prod_rx_buf->skb = first_buf->skb;
+       /* move empty data from pool to prod */
+       prod_rx_buf->data = first_buf->data;
        dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-       /* point prod_bd to new skb */
+       /* point prod_bd to new data */
        prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
        prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 
@@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
        tpa_info->tpa_state = BNX2X_TPA_START;
        tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
        tpa_info->placement_offset = cqe->placement_offset;
+       tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
 
 #ifdef BNX2X_STOP_ON_ERROR
        fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 {
        struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
        struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
-       u8 pad = tpa_info->placement_offset;
+       u32 pad = tpa_info->placement_offset;
        u16 len = tpa_info->len_on_bd;
-       struct sk_buff *skb = rx_buf->skb;
+       struct sk_buff *skb = NULL;
+       u8 *data = rx_buf->data;
        /* alloc new skb */
-       struct sk_buff *new_skb;
+       u8 *new_data;
        u8 old_tpa_state = tpa_info->tpa_state;
 
        tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,18 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        if (old_tpa_state == BNX2X_TPA_ERROR)
                goto drop;
 
-       /* Try to allocate the new skb */
-       new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+       /* Try to allocate the new data */
+       new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
 
        /* Unmap skb in the pool anyway, as we are going to change
           pool entry status to BNX2X_TPA_STOP even if new skb allocation
           fails. */
        dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
                         fp->rx_buf_size, DMA_FROM_DEVICE);
+       if (likely(new_data))
+               skb = build_skb(data);
 
-       if (likely(new_skb)) {
-               prefetch(skb);
-               prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+       if (likely(skb)) {
 
 #ifdef BNX2X_STOP_ON_ERROR
                if (pad + len > fp->rx_buf_size) {
@@ -507,8 +534,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 #endif
 
-               skb_reserve(skb, pad);
+               skb_reserve(skb, pad + NET_SKB_PAD);
                skb_put(skb, len);
+               skb->rxhash = tpa_info->rxhash;
 
                skb->protocol = eth_type_trans(skb, bp->dev);
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,8 +552,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 
 
-               /* put new skb in bin */
-               rx_buf->skb = new_skb;
+               /* put new data in bin */
+               rx_buf->data = new_data;
 
                return;
        }
@@ -537,19 +565,6 @@ drop:
        fp->eth_q_stats.rx_skb_alloc_failed++;
 }
 
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
-                                       struct sk_buff *skb)
-{
-       /* Set Toeplitz hash from CQE */
-       if ((bp->dev->features & NETIF_F_RXHASH) &&
-           (cqe->fast_path_cqe.status_flags &
-            ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
-               skb->rxhash =
-               le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
 
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
@@ -592,6 +607,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                u8 cqe_fp_flags;
                enum eth_rx_cqe_type cqe_fp_type;
                u16 len, pad;
+               u8 *data;
 
 #ifdef BNX2X_STOP_ON_ERROR
                if (unlikely(bp->panic))
@@ -602,13 +618,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                bd_prod = RX_BD(bd_prod);
                bd_cons = RX_BD(bd_cons);
 
-               /* Prefetch the page containing the BD descriptor
-                  at producer's index. It will be needed when new skb is
-                  allocated */
-               prefetch((void *)(PAGE_ALIGN((unsigned long)
-                                            (&fp->rx_desc_ring[bd_prod])) -
-                                 PAGE_SIZE + 1));
-
                cqe = &fp->rx_comp_ring[comp_ring_cons];
                cqe_fp = &cqe->fast_path_cqe;
                cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +633,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
                        bnx2x_sp_event(fp, cqe);
                        goto next_cqe;
+               }
+               rx_buf = &fp->rx_buf_ring[bd_cons];
+               data = rx_buf->data;
 
-               /* this is an rx packet */
-               } else {
-                       rx_buf = &fp->rx_buf_ring[bd_cons];
-                       skb = rx_buf->skb;
-                       prefetch(skb);
-
-                       if (!CQE_TYPE_FAST(cqe_fp_type)) {
+               if (!CQE_TYPE_FAST(cqe_fp_type)) {
 #ifdef BNX2X_STOP_ON_ERROR
-                               /* sanity check */
-                               if (fp->disable_tpa &&
-                                   (CQE_TYPE_START(cqe_fp_type) ||
-                                    CQE_TYPE_STOP(cqe_fp_type)))
-                                       BNX2X_ERR("START/STOP packet while "
-                                                 "disable_tpa type %x\n",
-                                                 CQE_TYPE(cqe_fp_type));
+                       /* sanity check */
+                       if (fp->disable_tpa &&
+                           (CQE_TYPE_START(cqe_fp_type) ||
+                            CQE_TYPE_STOP(cqe_fp_type)))
+                               BNX2X_ERR("START/STOP packet while "
+                                         "disable_tpa type %x\n",
+                                         CQE_TYPE(cqe_fp_type));
 #endif
 
-                               if (CQE_TYPE_START(cqe_fp_type)) {
-                                       u16 queue = cqe_fp->queue_index;
-                                       DP(NETIF_MSG_RX_STATUS,
-                                          "calling tpa_start on queue %d\n",
-                                          queue);
+                       if (CQE_TYPE_START(cqe_fp_type)) {
+                               u16 queue = cqe_fp->queue_index;
+                               DP(NETIF_MSG_RX_STATUS,
+                                  "calling tpa_start on queue %d\n",
+                                  queue);
 
-                                       bnx2x_tpa_start(fp, queue, skb,
-                                                       bd_cons, bd_prod,
-                                                       cqe_fp);
-
-                                       /* Set Toeplitz hash for LRO skb */
-                                       bnx2x_set_skb_rxhash(bp, cqe, skb);
-
-                                       goto next_rx;
-
-                               } else {
-                                       u16 queue =
-                                               cqe->end_agg_cqe.queue_index;
-                                       DP(NETIF_MSG_RX_STATUS,
-                                          "calling tpa_stop on queue %d\n",
-                                          queue);
-
-                                       bnx2x_tpa_stop(bp, fp, queue,
-                                                      &cqe->end_agg_cqe,
-                                                      comp_ring_cons);
+                               bnx2x_tpa_start(fp, queue,
+                                               bd_cons, bd_prod,
+                                               cqe_fp);
+                               goto next_rx;
+                       } else {
+                               u16 queue =
+                                       cqe->end_agg_cqe.queue_index;
+                               DP(NETIF_MSG_RX_STATUS,
+                                  "calling tpa_stop on queue %d\n",
+                                  queue);
+
+                               bnx2x_tpa_stop(bp, fp, queue,
+                                              &cqe->end_agg_cqe,
+                                              comp_ring_cons);
 #ifdef BNX2X_STOP_ON_ERROR
-                                       if (bp->panic)
-                                               return 0;
+                               if (bp->panic)
+                                       return 0;
 #endif
 
-                                       bnx2x_update_sge_prod(fp, cqe_fp);
-                                       goto next_cqe;
-                               }
+                               bnx2x_update_sge_prod(fp, cqe_fp);
+                               goto next_cqe;
                        }
-                       /* non TPA */
-                       len = le16_to_cpu(cqe_fp->pkt_len);
-                       pad = cqe_fp->placement_offset;
-                       dma_sync_single_for_cpu(&bp->pdev->dev,
+               }
+               /* non TPA */
+               len = le16_to_cpu(cqe_fp->pkt_len);
+               pad = cqe_fp->placement_offset;
+               dma_sync_single_for_cpu(&bp->pdev->dev,
                                        dma_unmap_addr(rx_buf, mapping),
-                                                      pad + RX_COPY_THRESH,
-                                                      DMA_FROM_DEVICE);
-                       prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+                                       pad + RX_COPY_THRESH,
+                                       DMA_FROM_DEVICE);
+               pad += NET_SKB_PAD;
+               prefetch(data + pad); /* speedup eth_type_trans() */
+               /* is this an error packet? */
+               if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+                       DP(NETIF_MSG_RX_ERR,
+                          "ERROR  flags %x  rx packet %u\n",
+                          cqe_fp_flags, sw_comp_cons);
+                       fp->eth_q_stats.rx_err_discard_pkt++;
+                       goto reuse_rx;
+               }
 
-                       /* is this an error packet? */
-                       if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+               /* Since we don't have a jumbo ring
+                * copy small packets if mtu > 1500
+                */
+               if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+                   (len <= RX_COPY_THRESH)) {
+                       skb = netdev_alloc_skb_ip_align(bp->dev, len);
+                       if (skb == NULL) {
                                DP(NETIF_MSG_RX_ERR,
-                                  "ERROR  flags %x  rx packet %u\n",
-                                  cqe_fp_flags, sw_comp_cons);
-                               fp->eth_q_stats.rx_err_discard_pkt++;
+                                  "ERROR  packet dropped because of alloc failure\n");
+                               fp->eth_q_stats.rx_skb_alloc_failed++;
                                goto reuse_rx;
                        }
-
-                       /* Since we don't have a jumbo ring
-                        * copy small packets if mtu > 1500
-                        */
-                       if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
-                           (len <= RX_COPY_THRESH)) {
-                               struct sk_buff *new_skb;
-
-                               new_skb = netdev_alloc_skb(bp->dev, len + pad);
-                               if (new_skb == NULL) {
-                                       DP(NETIF_MSG_RX_ERR,
-                                          "ERROR  packet dropped "
-                                          "because of alloc failure\n");
-                                       fp->eth_q_stats.rx_skb_alloc_failed++;
-                                       goto reuse_rx;
-                               }
-
-                               /* aligned copy */
-                               skb_copy_from_linear_data_offset(skb, pad,
-                                                   new_skb->data + pad, len);
-                               skb_reserve(new_skb, pad);
-                               skb_put(new_skb, len);
-
-                               bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
-                               skb = new_skb;
-
-                       } else
-                       if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+                       memcpy(skb->data, data + pad, len);
+                       bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+               } else {
+                       if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
                                dma_unmap_single(&bp->pdev->dev,
-                                       dma_unmap_addr(rx_buf, mapping),
+                                                dma_unmap_addr(rx_buf, mapping),
                                                 fp->rx_buf_size,
                                                 DMA_FROM_DEVICE);
+                               skb = build_skb(data);
+                               if (unlikely(!skb)) {
+                                       kfree(data);
+                                       fp->eth_q_stats.rx_skb_alloc_failed++;
+                                       goto next_rx;
+                               }
                                skb_reserve(skb, pad);
-                               skb_put(skb, len);
-
                        } else {
                                DP(NETIF_MSG_RX_ERR,
                                   "ERROR  packet dropped because "
                                   "of alloc failure\n");
                                fp->eth_q_stats.rx_skb_alloc_failed++;
 reuse_rx:
-                               bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+                               bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
                                goto next_rx;
                        }
+               }
 
-                       skb->protocol = eth_type_trans(skb, bp->dev);
+               skb_put(skb, len);
+               skb->protocol = eth_type_trans(skb, bp->dev);
 
-                       /* Set Toeplitz hash for a none-LRO skb */
-                       bnx2x_set_skb_rxhash(bp, cqe, skb);
+               /* Set Toeplitz hash for a none-LRO skb */
+               skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
 
-                       skb_checksum_none_assert(skb);
+               skb_checksum_none_assert(skb);
 
-                       if (bp->dev->features & NETIF_F_RXCSUM) {
+               if (bp->dev->features & NETIF_F_RXCSUM) {
 
-                               if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               else
-                                       fp->eth_q_stats.hw_csum_err++;
-                       }
+                       if (likely(BNX2X_RX_CSUM_OK(cqe)))
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       else
+                               fp->eth_q_stats.hw_csum_err++;
                }
 
-               skb_record_rx_queue(skb, fp->index);
+               skb_record_rx_queue(skb, fp->rx_queue);
 
                if (le16_to_cpu(cqe_fp->pars_flags.flags) &
                    PARSING_FLAGS_VLAN)
@@ -765,7 +759,7 @@ reuse_rx:
 
 
 next_rx:
-               rx_buf->skb = NULL;
+               rx_buf->data = NULL;
 
                bd_cons = NEXT_RX_IDX(bd_cons);
                bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1005,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                struct sw_rx_bd *first_buf =
                                        &tpa_info->first_buf;
 
-                               first_buf->skb = netdev_alloc_skb(bp->dev,
-                                                      fp->rx_buf_size);
-                               if (!first_buf->skb) {
+                               first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+                                                         GFP_ATOMIC);
+                               if (!first_buf->data) {
                                        BNX2X_ERR("Failed to allocate TPA "
                                                  "skb pool for queue[%d] - "
                                                  "disabling TPA on this "
@@ -1093,16 +1087,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
                struct bnx2x_fastpath *fp = &bp->fp[i];
                for_each_cos_in_tx_queue(fp, cos) {
                        struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+                       unsigned pkts_compl = 0, bytes_compl = 0;
 
-                       u16 bd_cons = txdata->tx_bd_cons;
                        u16 sw_prod = txdata->tx_pkt_prod;
                        u16 sw_cons = txdata->tx_pkt_cons;
 
                        while (sw_cons != sw_prod) {
-                               bd_cons = bnx2x_free_tx_pkt(bp, txdata,
-                                                           TX_BD(sw_cons));
+                               bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+                                   &pkts_compl, &bytes_compl);
                                sw_cons++;
                        }
+                       netdev_tx_reset_queue(
+                           netdev_get_tx_queue(bp->dev, txdata->txq_index));
                }
        }
 }
@@ -1118,16 +1114,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
 
        for (i = 0; i < NUM_RX_BD; i++) {
                struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
-               struct sk_buff *skb = rx_buf->skb;
+               u8 *data = rx_buf->data;
 
-               if (skb == NULL)
+               if (data == NULL)
                        continue;
                dma_unmap_single(&bp->pdev->dev,
                                 dma_unmap_addr(rx_buf, mapping),
                                 fp->rx_buf_size, DMA_FROM_DEVICE);
 
-               rx_buf->skb = NULL;
-               dev_kfree_skb(skb);
+               rx_buf->data = NULL;
+               kfree(data);
        }
 }
 
@@ -1445,6 +1441,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
                break;
        }
 
+#ifdef BCM_CNIC
+       /* override in ISCSI SD mod */
+       if (IS_MF_ISCSI_SD(bp))
+               bp->num_queues = 1;
+#endif
        /* Add special queues */
        bp->num_queues += NON_ETH_CONTEXT_USE;
 }
@@ -1509,6 +1510,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
 
        for_each_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
+               u32 mtu;
 
                /* Always use a mini-jumbo MTU for the FCoE L2 ring */
                if (IS_FCOE_IDX(i))
@@ -1518,13 +1520,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
                         * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
                         * overrun attack.
                         */
-                       fp->rx_buf_size =
-                               BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
-                               BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+                       mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
                else
-                       fp->rx_buf_size =
-                               bp->dev->mtu + ETH_OVREHEAD +
-                               BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+                       mtu = bp->dev->mtu;
+               fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+                                 IP_HEADER_ALIGNMENT_PADDING +
+                                 ETH_OVREHEAD +
+                                 mtu +
+                                 BNX2X_FW_RX_ALIGN_END;
+               /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
        }
 }
 
@@ -1929,13 +1933,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                break;
        }
 
-       if (!bp->port.pmf)
+       if (bp->port.pmf)
+               bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+       else
                bnx2x__link_status_update(bp);
 
        /* start the timer */
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 
 #ifdef BCM_CNIC
+       /* re-read iscsi info */
+       bnx2x_get_iscsi_info(bp);
        bnx2x_setup_cnic_irq_info(bp);
        if (bp->state == BNX2X_STATE_OPEN)
                bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2807,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
                                           skb_frag_size(frag), DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+                       unsigned int pkts_compl = 0, bytes_compl = 0;
 
                        DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
                                                "dropping packet...\n");
@@ -2810,7 +2819,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                         */
                        first_bd->nbd = cpu_to_le16(nbd);
                        bnx2x_free_tx_pkt(bp, txdata,
-                                         TX_BD(txdata->tx_pkt_prod));
+                                         TX_BD(txdata->tx_pkt_prod),
+                                         &pkts_compl, &bytes_compl);
                        return NETDEV_TX_OK;
                }
 
@@ -2871,6 +2881,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                   pbd_e2->parsing_data);
        DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        txdata->tx_pkt_prod++;
        /*
         * Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2993,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        struct bnx2x *bp = netdev_priv(dev);
        int rc = 0;
 
-       if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+       if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
                return -EINVAL;
 
+#ifdef BCM_CNIC
+       if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+               return -EINVAL;
+#endif
+
        if (netif_running(dev))  {
                rc = bnx2x_set_eth_mac(bp, false);
                if (rc)
@@ -3098,7 +3115,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        u8 cos;
        int rx_ring_size = 0;
 
-       /* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+       if (IS_MF_ISCSI_SD(bp)) {
+               rx_ring_size = MIN_RX_SIZE_NONTPA;
+               bp->rx_ring_size = rx_ring_size;
+       } else
+#endif
        if (!bp->rx_ring_size) {
 
                rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3130,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
                                     MIN_RX_SIZE_TPA, rx_ring_size);
 
                bp->rx_ring_size = rx_ring_size;
-       } else
+       } else /* if rx_ring_size specified - use it */
                rx_ring_size = bp->rx_ring_size;
 
        /* Common */
@@ -3278,14 +3300,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
        msix_table_size = bp->igu_sb_cnt + 1;
 
        /* fp array: RSS plus CNIC related L2 queues */
-       fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+       fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
                     sizeof(*fp), GFP_KERNEL);
        if (!fp)
                goto alloc_err;
        bp->fp = fp;
 
        /* msix table */
-       tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+       tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
        if (!tbl)
                goto alloc_err;
        bp->msix_table = tbl;
@@ -3409,7 +3431,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
        return bnx2x_reload_if_running(dev);
 }
 
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -3420,7 +3443,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 flags = bp->flags;
index 283d663..bf27c54 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 
 
 #include "bnx2x.h"
@@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
  */
 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
 #endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+       netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
 
 /**
  * bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
 {
        /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
-       memset(fp->sge_mask, 0xff,
-              (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+       memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
 
        /* Clear the two last indices in the page to 1:
           these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
        return 0;
 }
 
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
-                                    struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+                                     struct bnx2x_fastpath *fp, u16 index)
 {
-       struct sk_buff *skb;
+       u8 *data;
        struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
        struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
        dma_addr_t mapping;
 
-       skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
-       if (unlikely(skb == NULL))
+       data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+       if (unlikely(data == NULL))
                return -ENOMEM;
 
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+       mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+                                fp->rx_buf_size,
                                 DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               dev_kfree_skb_any(skb);
+               kfree(data);
                return -ENOMEM;
        }
 
-       rx_buf->skb = skb;
+       rx_buf->data = data;
        dma_unmap_addr_set(rx_buf, mapping, mapping);
 
        rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
        return 0;
 }
 
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
  * we are just moving one from cons to prod
  * we are not creating a new mapping,
  * so there is no need to check for dma_mapping_error().
  */
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
                                      u16 cons, u16 prod)
 {
        struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
 
        dma_unmap_addr_set(prod_rx_buf, mapping,
                           dma_unmap_addr(cons_rx_buf, mapping));
-       prod_rx_buf->skb = cons_rx_buf->skb;
+       prod_rx_buf->data = cons_rx_buf->data;
        *prod_bd = *cons_bd;
 }
 
@@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
        for (i = 0; i < last; i++) {
                struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
                struct sw_rx_bd *first_buf = &tpa_info->first_buf;
-               struct sk_buff *skb = first_buf->skb;
+               u8 *data = first_buf->data;
 
-               if (skb == NULL) {
+               if (data == NULL) {
                        DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
                        continue;
                }
@@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
                        dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(first_buf, mapping),
                                         fp->rx_buf_size, DMA_FROM_DEVICE);
-               dev_kfree_skb(skb);
-               first_buf->skb = NULL;
+               kfree(data);
+               first_buf->data = NULL;
        }
 }
 
@@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
         * fp->eth_q_stats.rx_skb_alloc_failed = 0
         */
        for (i = 0; i < rx_ring_size; i++) {
-               if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+               if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
                        fp->eth_q_stats.rx_skb_alloc_failed++;
                        continue;
                }
@@ -1318,6 +1320,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
        struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
        unsigned long q_type = 0;
 
+       bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
        bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
                                                     BNX2X_FCOE_ETH_CL_ID_IDX);
        /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1491,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
        return max_cfg;
 }
 
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp:                driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+       return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp:                driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+       int func;
+       int vn;
+
+       /* Set the attention towards other drivers on the same port */
+       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+               if (vn == BP_VN(bp))
+                       continue;
+
+               func = func_by_vn(bp, vn);
+               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+                      (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+       }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp:                driver handle
+ * @flags:     flags to update
+ * @set:       set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+       if (SHMEM2_HAS(bp, drv_flags)) {
+               u32 drv_flags;
+               bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+               drv_flags = SHMEM2_RD(bp, drv_flags);
+
+               if (set)
+                       SET_FLAGS(drv_flags, flags);
+               else
+                       RESET_FLAGS(drv_flags, flags);
+
+               SHMEM2_WR(bp, drv_flags, drv_flags);
+               DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+               bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+       }
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+       if (is_valid_ether_addr(addr))
+               return true;
+#ifdef BCM_CNIC
+       if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+               return true;
+#endif
+       return false;
+}
+
 #endif /* BNX2X_CMN_H */
index 51bd748..5051cf3 100644 (file)
@@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
 }
 #endif
 
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
-       if (SHMEM2_HAS(bp, drv_flags)) {
-               u32 drv_flags;
-               bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
-               drv_flags = SHMEM2_RD(bp, drv_flags);
-
-               if (set)
-                       SET_FLAGS(drv_flags, flags);
-               else
-                       RESET_FLAGS(drv_flags, flags);
-
-               SHMEM2_WR(bp, drv_flags, drv_flags);
-               DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
-               bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
-       }
-}
-
 static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
 {
        u8 prio, cos;
@@ -755,18 +737,26 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                        /* mark DCBX result for PMF migration */
                        bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
 #ifdef BCM_DCBNL
-                       /**
+                       /*
                         * Add new app tlvs to dcbnl
                         */
                        bnx2x_dcbnl_update_applist(bp, false);
 #endif
-                       bnx2x_dcbx_stop_hw_tx(bp);
-
-                       /* reconfigure the netdevice with the results of the new
+                       /*
+                        * reconfigure the netdevice with the results of the new
                         * dcbx negotiation.
                         */
                        bnx2x_dcbx_update_tc_mapping(bp);
 
+                       /*
+                        * allow other funtions to update their netdevices
+                        * accordingly
+                        */
+                       if (IS_MF(bp))
+                               bnx2x_link_sync_notify(bp);
+
+                       bnx2x_dcbx_stop_hw_tx(bp);
+
                        return;
                }
        case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +765,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
 
                bnx2x_dcbx_update_ets_params(bp);
                bnx2x_dcbx_resume_hw_tx(bp);
+
                return;
        case BNX2X_DCBX_STATE_TX_RELEASED:
                DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +874,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
                /*For IEEE admin_recommendation_bw_precentage
                 *For IEEE admin_recommendation_ets_pg */
                af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
                        if (dp->admin_priority_app_table[i].valid) {
                                struct bnx2x_admin_priority_app_table *table =
                                        dp->admin_priority_app_table;
@@ -923,7 +914,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
 
 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
 {
-       if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+       if (!CHIP_IS_E1x(bp)) {
                bp->dcb_state = dcb_on;
                bp->dcbx_enabled = dcbx_enabled;
        } else {
@@ -1863,7 +1854,7 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
 void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
 {
        /* if we need to syncronize DCBX result from prev PMF
-        * read it from shmem and update bp accordingly
+        * read it from shmem and update bp and netdev accordingly
         */
        if (SHMEM2_HAS(bp, drv_flags) &&
           GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
@@ -1875,6 +1866,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
                                          bp->dcbx_error);
                bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
                                         bp->dcbx_error);
+#ifdef BCM_DCBNL
+               /*
+                * Add new app tlvs to dcbnl
+                */
+               bnx2x_dcbnl_update_applist(bp, false);
+               /*
+                * Send a notification for the new negotiated parameters
+                */
+               dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+               /*
+                * reconfigure the netdevice with the results of the new
+                * dcbx negotiation.
+                */
+               bnx2x_dcbx_update_tc_mapping(bp);
+
        }
 }
 
@@ -2242,7 +2249,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
        int i, ff;
 
        /* iterate over the app entries looking for idtype and idval */
-       for (i = 0, ff = -1; i < 4; i++) {
+       for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
                struct bnx2x_admin_priority_app_table *app_ent =
                        &bp->dcbx_config_params.admin_priority_app_table[i];
                if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2258,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
                if (ff < 0 && !app_ent->valid)
                        ff = i;
        }
-       if (i < 4)
+       if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
                /* if found overwrite up */
                bp->dcbx_config_params.
                        admin_priority_app_table[i].priority = up;
index 2c6a3bc..2ab9254 100644 (file)
@@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table {
                u32 app_id;
 };
 
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
 struct bnx2x_config_dcbx_params {
        u32 overwrite_settings;
        u32 admin_dcbx_version;
@@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params {
        u32 admin_recommendation_bw_precentage[8];
        u32 admin_recommendation_ets_pg[8];
        u32 admin_pfc_bitmap;
-       struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+       struct bnx2x_admin_priority_app_table
+               admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
        u32 admin_default_priority;
 };
 
index f0ca8b2..90d44af 100644 (file)
@@ -107,6 +107,10 @@ static const struct {
                                4, STATS_FLAGS_PORT, "rx_filtered_packets" },
        { STATS_OFFSET32(mf_tag_discard),
                                4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+       { STATS_OFFSET32(pfc_frames_received_hi),
+                               8, STATS_FLAGS_PORT, "pfc_frames_received" },
+       { STATS_OFFSET32(pfc_frames_sent_hi),
+                               8, STATS_FLAGS_PORT, "pfc_frames_sent" },
        { STATS_OFFSET32(brb_drop_hi),
                                8, STATS_FLAGS_PORT, "rx_brb_discard" },
        { STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                DP(NETIF_MSG_LINK, "Unsupported port type\n");
                return -EINVAL;
        }
-       /* Save new config in case command complete successuly */
+       /* Save new config in case command complete successully */
        new_multi_phy_config = bp->link_params.multi_phy_config;
        /* Get the new cfg_idx */
        cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -761,8 +765,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
        struct bnx2x *bp = netdev_priv(dev);
        u8 phy_fw_ver[PHY_FW_VER_LEN];
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 
        phy_fw_ver[0] = '\0';
        if (bp->port.pmf) {
@@ -773,14 +777,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
                bnx2x_release_phy_lock(bp);
        }
 
-       strncpy(info->fw_version, bp->fw_ver, 32);
+       strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
        snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
                 "bc %d.%d.%d%s%s",
                 (bp->common.bc_ver & 0xff0000) >> 16,
                 (bp->common.bc_ver & 0xff00) >> 8,
                 (bp->common.bc_ver & 0xff),
                 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
-       strcpy(info->bus_info, pci_name(bp->pdev));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
        info->n_stats = BNX2X_NUM_STATS;
        info->testinfo_len = BNX2X_NUM_TESTS;
        info->eedump_len = bp->common.flash_size;
@@ -1740,6 +1744,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        struct sw_rx_bd *rx_buf;
        u16 len;
        int rc = -ENODEV;
+       u8 *data;
+       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 
        /* check the loopback mode */
        switch (loopback_mode) {
@@ -1748,8 +1754,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
                        return -EINVAL;
                break;
        case BNX2X_MAC_LOOPBACK:
-               bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
-                                               LOOPBACK_XMAC : LOOPBACK_BMAC;
+               if (CHIP_IS_E3(bp)) {
+                       int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+                       if (bp->port.supported[cfg_idx] &
+                           (SUPPORTED_10000baseT_Full |
+                            SUPPORTED_20000baseMLD2_Full |
+                            SUPPORTED_20000baseKR2_Full))
+                               bp->link_params.loopback_mode = LOOPBACK_XMAC;
+                       else
+                               bp->link_params.loopback_mode = LOOPBACK_UMAC;
+               } else
+                       bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
                break;
        default:
@@ -1784,6 +1800,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
        rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        pkt_prod = txdata->tx_pkt_prod++;
        tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
        tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1865,10 +1883,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        dma_sync_single_for_cpu(&bp->pdev->dev,
                                   dma_unmap_addr(rx_buf, mapping),
                                   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
-       skb = rx_buf->skb;
-       skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+       data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
        for (i = ETH_HLEN; i < pkt_size; i++)
-               if (*(skb->data + i) != (unsigned char) (i & 0xff))
+               if (*(data + i) != (unsigned char) (i & 0xff))
                        goto test_loopback_rx_exit;
 
        rc = 0;
index fc754cb..3e30c86 100644 (file)
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
        #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
        #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
        #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+       #define REQ_BC_VER_4_PFC_STATS_SUPPORTED        0x00070201
 
        #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
        #define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
 
        #define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+       #define DRV_MSG_CODE_DRV_INFO_ACK               0xd8000000
+       #define DRV_MSG_CODE_DRV_INFO_NACK              0xd9000000
 
        #define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
        #define REQ_BC_VER_4_SET_MF_BW                  0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
        #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
        #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
        #define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+       #define FW_MSG_CODE_DRV_INFO_ACK                0xd8100000
+       #define FW_MSG_CODE_DRV_INFO_NACK               0xd9100000
 
        #define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
        #define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
 
        #define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
        #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+       #define DRV_STATUS_DRV_INFO_REQ                 0x04000000
 
        u32 virt_mac_upper;
        #define VIRT_MAC_SIGN_MASK                      0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
        u32 extended_dev_info_shared_addr;
        u32 ncsi_oem_data_addr;
 
-       u32 ocsd_host_addr;
-       u32 ocbb_host_addr;
-       u32 ocsd_req_update_interval;
+       u32 ocsd_host_addr; /* initialized by option ROM */
+       u32 ocbb_host_addr; /* initialized by option ROM */
+       u32 ocsd_req_update_interval; /* initialized by option ROM */
+       u32 temperature_in_half_celsius;
+       u32 glob_struct_in_host;
+
+       u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE                    0x00000000
+
+       u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+
+       u32 extended_dev_info_shared_cfg_size;
+
+       u32 dcbx_en[PORT_MAX];
+
+       /* The offset points to the multi threaded meta structure */
+       u32 multi_thread_data_offset;
+
+       /* address of DMAable host address holding values from the drivers */
+       u32 drv_info_host_addr_lo;
+       u32 drv_info_host_addr_hi;
+
+       /* general values written by the MFW (such as current version) */
+       u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK          0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT         0
+#define DRV_INFO_CONTROL_OP_CODE_MASK      0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT     8
 };
 
 
@@ -2501,14 +2536,18 @@ struct mac_stx {
 #define MAC_STX_IDX_MAX                     2
 
 struct host_port_stats {
-       u32            host_port_stats_start;
+       u32            host_port_stats_counter;
 
        struct mac_stx mac_stx[MAC_STX_IDX_MAX];
 
        u32            brb_drop_hi;
        u32            brb_drop_lo;
 
-       u32            host_port_stats_end;
+       u32            not_used; /* obsolete */
+       u32            pfc_frames_tx_hi;
+       u32            pfc_frames_tx_lo;
+       u32            pfc_frames_rx_hi;
+       u32            pfc_frames_rx_lo;
 };
 
 
@@ -2548,6 +2587,118 @@ struct host_func_stats {
 /* VIC definitions */
 #define VICSTATST_UIF_INDEX 2
 
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+       ETH_STATS_OPCODE,
+       FCOE_STATS_OPCODE,
+       ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN      12
+/*  Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+       /* Function's Driver Version. padded to 12 */
+       u8 version[ETH_STAT_INFO_VERSION_LEN];
+       /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+       u8 mac_local[8];
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       u8 mac_add2[8];         /* Additional Programmed MAC Addr 2. */
+       u32 mtu_size;           /* MTU Size. Note   : Negotiated MTU */
+       u32 feature_flags;      /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK                0x01
+#define FEATURE_ETH_LSO_MASK                   0x02
+#define FEATURE_ETH_BOOTMODE_MASK              0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT             2
+#define FEATURE_ETH_BOOTMODE_NONE              (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE               (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI             (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE              (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK                   0x20
+       u32 lso_max_size;       /* LSO MaxOffloadSize. */
+       u32 lso_min_seg_cnt;    /* LSO MinSegmentCount. */
+       /* Num Offloaded Connections TCP_IPv4. */
+       u32 ipv4_ofld_cnt;
+       /* Num Offloaded Connections TCP_IPv6. */
+       u32 ipv6_ofld_cnt;
+       u32 promiscuous_mode;   /* Promiscuous Mode. non-zero true */
+       u32 txq_size;           /* TX Descriptors Queue Size */
+       u32 rxq_size;           /* RX Descriptors Queue Size */
+       /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+       u32 txq_avg_depth;
+       /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+       u32 rxq_avg_depth;
+       /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+       u32 iov_offload;
+       /* Number of NetQueue/VMQ Config'd. */
+       u32 netq_cnt;
+       u32 vf_cnt;             /* Num VF assigned to this PF. */
+};
+
+/*  Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+       u8 version[12];         /* Function's Driver Version. */
+       u8 mac_local[8];        /* Locally Admin Addr. */
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       u8 mac_add2[8];         /* Additional Programmed MAC Addr 2. */
+       /* QoS Priority (per 802.1p). 0-7255 */
+       u32 qos_priority;
+       u32 txq_size;           /* FCoE TX Descriptors Queue Size. */
+       u32 rxq_size;           /* FCoE RX Descriptors Queue Size. */
+       /* FCoE TX Descriptor Queue Avg Depth. */
+       u32 txq_avg_depth;
+       /* FCoE RX Descriptors Queue Avg Depth. */
+       u32 rxq_avg_depth;
+       u32 rx_frames_lo;       /* FCoE RX Frames received. */
+       u32 rx_frames_hi;       /* FCoE RX Frames received. */
+       u32 rx_bytes_lo;        /* FCoE RX Bytes received. */
+       u32 rx_bytes_hi;        /* FCoE RX Bytes received. */
+       u32 tx_frames_lo;       /* FCoE TX Frames sent. */
+       u32 tx_frames_hi;       /* FCoE TX Frames sent. */
+       u32 tx_bytes_lo;        /* FCoE TX Bytes sent. */
+       u32 tx_bytes_hi;        /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI  Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+       u8 version[12];         /* Function's Driver Version. */
+       u8 mac_local[8];        /* Locally Admin iSCSI MAC Addr. */
+       u8 mac_add1[8];         /* Additional Programmed MAC Addr 1. */
+       /* QoS Priority (per 802.1p). 0-7255 */
+       u32 qos_priority;
+       u8 initiator_name[64];  /* iSCSI Boot Initiator Node name. */
+       u8 ww_port_name[64];    /* iSCSI World wide port name */
+       u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+       u8 boot_target_ip[16];  /* iSCSI Boot Target IP. */
+       u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+       u8 boot_init_ip[16];    /* iSCSI Boot Initiator IP Address. */
+       u32 max_frame_size;     /* Max Frame Size. bytes */
+       u32 txq_size;           /* PDU TX Descriptors Queue Size. */
+       u32 rxq_size;           /* PDU RX Descriptors Queue Size. */
+       u32 txq_avg_depth;      /* PDU TX Descriptor Queue Avg Depth. */
+       u32 rxq_avg_depth;      /* PDU RX Descriptors Queue Avg Depth. */
+       u32 rx_pdus_lo;         /* iSCSI PDUs received. */
+       u32 rx_pdus_hi;         /* iSCSI PDUs received. */
+       u32 rx_bytes_lo;        /* iSCSI RX Bytes received. */
+       u32 rx_bytes_hi;        /* iSCSI RX Bytes received. */
+       u32 tx_pdus_lo;         /* iSCSI PDUs sent. */
+       u32 tx_pdus_hi;         /* iSCSI PDUs sent. */
+       u32 tx_bytes_lo;        /* iSCSI PDU TX Bytes sent. */
+       u32 tx_bytes_hi;        /* iSCSI PDU TX Bytes sent. */
+       u32 pcp_prior_map_tbl;  /* C-PCP to S-PCP Priority MapTable.
+                                * 9 nibbles, the position of each nibble
+                                * represents the C-PCP value, the value
+                                * of the nibble = S-PCP value.
+                                */
+};
+
+union drv_info_to_mcp {
+       struct eth_stats_info   ether_stat;
+       struct fcoe_stats_info  fcoe_stat;
+       struct iscsi_stats_info iscsi_stat;
+};
 #define BCM_5710_FW_MAJOR_VERSION                      7
 #define BCM_5710_FW_MINOR_VERSION                      0
 #define BCM_5710_FW_REVISION_VERSION           29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
 
 
 /*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
  */
+struct fcoe_rx_stat_params_section0 {
+       __le32 fcoe_rx_pkt_cnt;
+       __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+       __le32 fcoe_ver_cnt;
+       __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+       __le32 fc_crc_cnt;
+       __le32 eofa_del_cnt;
+       __le32 miss_frame_cnt;
+       __le32 seq_timeout_cnt;
+       __le32 drop_seq_cnt;
+       __le32 fcoe_rx_drop_pkt_cnt;
+       __le32 fcp_rx_pkt_cnt;
+       __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+       __le32 fcoe_tx_pkt_cnt;
+       __le32 fcoe_tx_byte_cnt;
+       __le32 fcp_tx_pkt_cnt;
+       __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+       struct fcoe_tx_stat_params tx_stat;
+       struct fcoe_rx_stat_params_section0 rx_stat0;
+       struct fcoe_rx_stat_params_section1 rx_stat1;
+       struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
 struct cfc_del_event_data {
        u32 cid;
        u32 reserved0;
index 882f48f..4df9505 100644 (file)
@@ -27,7 +27,6 @@
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 
-
 /********************************************************/
 #define ETH_HLEN                       14
 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 #define EDC_MODE_LIMITING                              0x0044
 #define EDC_MODE_PASSIVE_DAC                   0x0055
 
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR     170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR              250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR              10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR               50
 
 /* BRB thresholds for E2*/
 #define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE            170
 #define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE                      50
 #define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE          250
 
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR   290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR    410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR    10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR     50
+
 /* BRB thresholds for E3A0 */
 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE          290
 #define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE              0
 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE            50
 #define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE                410
 
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR   330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR    490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR    15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR     55
 
 /* BRB thresholds for E3B0 2 port mode*/
 #define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE               1025
 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE         50
 #define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE     384
 
-
 /* only for E3B0*/
 #define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR                       304
 #define PFC_E3B0_4P_BRB_FULL_LB_XON_THR                        384
-#define PFC_E3B0_4P_LB_GUART                           120
+#define PFC_E3B0_4P_LB_GUART           120
 
 #define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART            120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST               80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST       80
 
 #define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART            80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST               120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST       120
+
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR                      330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR                       490
+#define DEFAULT_E3B0_LB_GUART          40
+
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART           40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST      0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART           40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST      0
 
+/* ETS defines*/
 #define DCBX_INVALID_COS                                       (0xFF)
 
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND                (0x5000)
@@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
        u32 min_w_val = 0;
        /* Calculate min_w_val.*/
        if (vars->link_up) {
-               if (SPEED_20000 == vars->line_speed)
+               if (vars->line_speed == SPEED_20000)
                        min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
                else
                        min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
                   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
 
-       if (0 == port) {
+       if (!port) {
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
                        credit_upper_bound);
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
                   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
        REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
                   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
-       if (0 == port) {
+       if (!port) {
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
                REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
        * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
        * port mode port1 has COS0-2 that can be used for WFQ.
        */
-       if (0 == port) {
+       if (!port) {
                base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
                max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
        } else {
@@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
        * In 2 port mode port0 has COS0-5 that can be used for WFQ.
        * In 4 port mode port1 has COS0-2 that can be used for WFQ.
        */
-       if (0 == port) {
+       if (!port) {
                base_weight = PBF_REG_COS0_WEIGHT_P0;
                max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
        } else {
@@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
 ******************************************************************************/
 static int bnx2x_ets_e3b0_get_total_bw(
        const struct link_params *params,
-       const struct bnx2x_ets_params *ets_params,
+       struct bnx2x_ets_params *ets_params,
        u16 *total_bw)
 {
        struct bnx2x *bp = params->bp;
        u8 cos_idx = 0;
+       u8 is_bw_cos_exist = 0;
 
        *total_bw = 0 ;
+
        /* Calculate total BW requested */
        for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
-               if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+               if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+                       is_bw_cos_exist = 1;
+                       if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+                               DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+                                                  "was set to 0\n");
+                               /*
+                                * This is to prevent a state when ramrods
+                                * can't be sent
+                               */
+                               ets_params->cos[cos_idx].params.bw_params.bw
+                                        = 1;
+                       }
                        *total_bw +=
                                ets_params->cos[cos_idx].params.bw_params.bw;
                }
        }
 
        /* Check total BW is valid */
-       if ((100 != *total_bw) || (0 == *total_bw)) {
-               if (0 == *total_bw) {
+       if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+               if (*total_bw == 0) {
                        DP(NETIF_MSG_LINK,
-                          "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+                          "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
                        return -EINVAL;
                }
                DP(NETIF_MSG_LINK,
-                  "bnx2x_ets_E3B0_config toatl BW should be 100\n");
-               /**
-               *   We can handle a case whre the BW isn't 100 this can happen
-               *   if the TC are joined.
-               */
+                  "bnx2x_ets_E3B0_config total BW should be 100\n");
+               /*
+                * We can handle a case whre the BW isn't 100 this can happen
+                * if the TC are joined.
+                */
        }
        return 0;
 }
@@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
        const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
                DCBX_E3B0_MAX_NUM_COS_PORT0;
 
-       if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+       if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
                DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
                                   "parameter There can't be two COS's with "
                                   "the same strict pri\n");
@@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
 
        if (pri > max_num_of_cos) {
                DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
-                              "parameter Illegal strict priority\n");
+                  "parameter Illegal strict priority\n");
            return -EINVAL;
        }
 
@@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 
        /* Set all the strict priority first */
        for (i = 0; i < max_num_of_cos; i++) {
-               if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
-                       if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+               if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+                       if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
                                DP(NETIF_MSG_LINK,
                                           "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
                                           "invalid cos entry\n");
@@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
                            sp_pri_to_cos[i], pri_set);
                        pri_bitmask = 1 << sp_pri_to_cos[i];
                        /* COS is used remove it from bitmap.*/
-                       if (0 == (pri_bitmask & cos_bit_to_set)) {
+                       if (!(pri_bitmask & cos_bit_to_set)) {
                                DP(NETIF_MSG_LINK,
                                        "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
                                        "invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
 ******************************************************************************/
 int bnx2x_ets_e3b0_config(const struct link_params *params,
                         const struct link_vars *vars,
-                        const struct bnx2x_ets_params *ets_params)
+                        struct bnx2x_ets_params *ets_params)
 {
        struct bnx2x *bp = params->bp;
        int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        /* Prepare BW parameters*/
        bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
                                                   &total_bw);
-       if (0 != bnx2x_status) {
+       if (bnx2x_status) {
                DP(NETIF_MSG_LINK,
                   "bnx2x_ets_E3B0_config get_total_bw failed\n");
                return -EINVAL;
        }
 
-       /**
-        *  Upper bound is set according to current link speed (min_w_val
-        *  should be the same for upper bound and COS credit val).
+       /*
+        * Upper bound is set according to current link speed (min_w_val
+        * should be the same for upper bound and COS credit val).
         */
        bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
        bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
                if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
                        cos_bw_bitmap |= (1 << cos_entry);
-                       /**
+                       /*
                         * The function also sets the BW in HW(not the mappin
                         * yet)
                         */
@@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
                           "bnx2x_ets_e3b0_config cos state not valid\n");
                        return -EINVAL;
                }
-               if (0 != bnx2x_status) {
+               if (bnx2x_status) {
                        DP(NETIF_MSG_LINK,
                           "bnx2x_ets_e3b0_config set cos bw failed\n");
                        return bnx2x_status;
@@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
        bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
                                                         sp_pri_to_cos);
 
-       if (0 != bnx2x_status) {
+       if (bnx2x_status) {
                DP(NETIF_MSG_LINK,
                   "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
                return bnx2x_status;
@@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
                                              cos_sp_bitmap,
                                              cos_bw_bitmap);
 
-       if (0 != bnx2x_status) {
+       if (bnx2x_status) {
                DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
                return bnx2x_status;
        }
@@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
 
        DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
 
-       if ((0 == total_bw) ||
-           (0 == cos0_bw) ||
-           (0 == cos1_bw)) {
+       if ((!total_bw) ||
+           (!cos0_bw) ||
+           (!cos1_bw)) {
                DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
                return;
        }
@@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
         * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
         * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
         */
-       val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+       val = (!strict_cos) ? 0x2318 : 0x22E0;
        REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 
        return 0;
@@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
 /******************************************************************/
 /*                     PFC section                               */
 /******************************************************************/
-
 static void bnx2x_update_pfc_xmac(struct link_params *params,
                                  struct link_vars *vars,
                                  u8 is_lb)
@@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
        if (!vars->link_up)
                return;
 
-       if (MAC_TYPE_EMAC == vars->mac_type) {
+       if (vars->mac_type == MAC_TYPE_EMAC) {
                DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
                bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
                                        pfc_frames_received);
@@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
 
        udelay(40);
 }
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+       u32 port4mode_ovwr_val;
+       /* Check 4-port override enabled */
+       port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+       if (port4mode_ovwr_val & (1<<0)) {
+               /* Return 4-port mode override value */
+               return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+       }
+       /* Return 4-port mode from input pin */
+       return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
 
 static void bnx2x_emac_init(struct link_params *params,
                            struct link_vars *vars)
@@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params,
 
 }
 
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
-       u32 port4mode_ovwr_val;
-       /* Check 4-port override enabled */
-       port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
-       if (port4mode_ovwr_val & (1<<0)) {
-               /* Return 4-port mode override value */
-               return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
-       }
-       /* Return 4-port mode from input pin */
-       return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
 /* Define the XMAC mode */
 static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
 {
        struct bnx2x *bp = params->bp;
        u32 is_port4mode = bnx2x_is_4_port_mode(bp);
 
-       /**
-       * In 4-port mode, need to set the mode only once, so if XMAC is
-       * already out of reset, it means the mode has already been set,
-       * and it must not* reset the XMAC again, since it controls both
-       * ports of the path
-       **/
+       /*
+        * In 4-port mode, need to set the mode only once, so if XMAC is
+        * already out of reset, it means the mode has already been set,
+        * and it must not* reset the XMAC again, since it controls both
+        * ports of the path
+        */
 
        if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
            (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
 
        return 0;
 }
+
 static int bnx2x_emac_enable(struct link_params *params,
                             struct link_vars *vars, u8 lb)
 {
@@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
        REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
-
 /* PFC BRB internal port configuration params */
 struct bnx2x_pfc_brb_threshold_val {
        u32 pause_xoff;
@@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val {
 };
 
 struct bnx2x_pfc_brb_e3b0_val {
+       u32 per_class_guaranty_mode;
+       u32 lb_guarantied_hyst;
        u32 full_lb_xoff_th;
        u32 full_lb_xon_threshold;
        u32 lb_guarantied;
@@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val {
 struct bnx2x_pfc_brb_th_val {
        struct bnx2x_pfc_brb_threshold_val pauseable_th;
        struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+       struct bnx2x_pfc_brb_threshold_val default_class0;
+       struct bnx2x_pfc_brb_threshold_val default_class1;
+
 };
 static int bnx2x_pfc_brb_get_config_params(
                                struct link_params *params,
@@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params(
 {
        struct bnx2x *bp = params->bp;
        DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+       config_val->default_class1.pause_xoff = 0;
+       config_val->default_class1.pause_xon = 0;
+       config_val->default_class1.full_xoff = 0;
+       config_val->default_class1.full_xon = 0;
+
        if (CHIP_IS_E2(bp)) {
+               /*  class0 defaults */
+               config_val->default_class0.pause_xoff =
+                       DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+               config_val->default_class0.pause_xon =
+                       DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+               config_val->default_class0.full_xoff =
+                       DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+               config_val->default_class0.full_xon =
+                       DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+               /*  pause able*/
                config_val->pauseable_th.pause_xoff =
-                   PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                config_val->pauseable_th.pause_xon =
-                   PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
                config_val->pauseable_th.full_xoff =
-                   PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
                config_val->pauseable_th.full_xon =
-                   PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
                /* non pause able*/
                config_val->non_pauseable_th.pause_xoff =
-                   PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.pause_xon =
-                   PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xoff =
-                   PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xon =
-                   PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+                       PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
        } else if (CHIP_IS_E3A0(bp)) {
+               /*  class0 defaults */
+               config_val->default_class0.pause_xoff =
+                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+               config_val->default_class0.pause_xon =
+                       DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+               config_val->default_class0.full_xoff =
+                       DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+               config_val->default_class0.full_xon =
+                       DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+               /*  pause able */
                config_val->pauseable_th.pause_xoff =
-                   PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                config_val->pauseable_th.pause_xon =
-                   PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
                config_val->pauseable_th.full_xoff =
-                   PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
                config_val->pauseable_th.full_xon =
-                   PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
                /* non pause able*/
                config_val->non_pauseable_th.pause_xoff =
-                   PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.pause_xon =
-                   PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xoff =
-                   PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
                config_val->non_pauseable_th.full_xon =
-                   PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+                       PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
        } else if (CHIP_IS_E3B0(bp)) {
+               /*  class0 defaults */
+               config_val->default_class0.pause_xoff =
+                       DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+               config_val->default_class0.pause_xon =
+                   DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+               config_val->default_class0.full_xoff =
+                   DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+               config_val->default_class0.full_xon =
+                   DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
                if (params->phy[INT_PHY].flags &
                    FLAGS_4_PORT_MODE) {
                        config_val->pauseable_th.pause_xoff =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
                        config_val->pauseable_th.pause_xon =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
                        config_val->pauseable_th.full_xoff =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
                        config_val->pauseable_th.full_xon =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+                               PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
                        /* non pause able*/
                        config_val->non_pauseable_th.pause_xoff =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
                        config_val->non_pauseable_th.pause_xon =
-                           PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
                        config_val->non_pauseable_th.full_xoff =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
                        config_val->non_pauseable_th.full_xon =
-                           PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-           } else {
-               config_val->pauseable_th.pause_xoff =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
-               config_val->pauseable_th.pause_xon =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
-               config_val->pauseable_th.full_xoff =
-                   PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
-               config_val->pauseable_th.full_xon =
-                       PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
-               /* non pause able*/
-               config_val->non_pauseable_th.pause_xoff =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.pause_xon =
-                   PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xoff =
-                   PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
-               config_val->non_pauseable_th.full_xon =
-                   PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
-           }
+                       PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+               } else {
+                       config_val->pauseable_th.pause_xoff =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+                       config_val->pauseable_th.pause_xon =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+                       config_val->pauseable_th.full_xoff =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+                       config_val->pauseable_th.full_xon =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+                       /* non pause able*/
+                       config_val->non_pauseable_th.pause_xoff =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+                       config_val->non_pauseable_th.pause_xon =
+                               PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+                       config_val->non_pauseable_th.full_xoff =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+                       config_val->non_pauseable_th.full_xon =
+                               PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+               }
        } else
            return -EINVAL;
 
        return 0;
 }
 
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
-                                                struct bnx2x_pfc_brb_e3b0_val
-                                                *e3b0_val,
-                                                u32 cos0_pauseable,
-                                                u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+               struct link_params *params,
+               struct bnx2x_pfc_brb_e3b0_val
+               *e3b0_val,
+               struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+               const u8 pfc_enabled)
 {
-       if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+       if (pfc_enabled && pfc_params) {
+               e3b0_val->per_class_guaranty_mode = 1;
+               e3b0_val->lb_guarantied_hyst = 80;
+
+               if (params->phy[INT_PHY].flags &
+                   FLAGS_4_PORT_MODE) {
+                       e3b0_val->full_lb_xoff_th =
+                               PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+                       e3b0_val->full_lb_xon_threshold =
+                               PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+                       e3b0_val->lb_guarantied =
+                               PFC_E3B0_4P_LB_GUART;
+                       e3b0_val->mac_0_class_t_guarantied =
+                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+                       e3b0_val->mac_0_class_t_guarantied_hyst =
+                               PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+                       e3b0_val->mac_1_class_t_guarantied =
+                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+                       e3b0_val->mac_1_class_t_guarantied_hyst =
+                               PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+               } else {
+                       e3b0_val->full_lb_xoff_th =
+                               PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+                       e3b0_val->full_lb_xon_threshold =
+                               PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+                       e3b0_val->mac_0_class_t_guarantied_hyst =
+                               PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+                       e3b0_val->mac_1_class_t_guarantied =
+                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+                       e3b0_val->mac_1_class_t_guarantied_hyst =
+                               PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+                       if (pfc_params->cos0_pauseable !=
+                               pfc_params->cos1_pauseable) {
+                               /* nonpauseable= Lossy + pauseable = Lossless*/
+                               e3b0_val->lb_guarantied =
+                                       PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+                               e3b0_val->mac_0_class_t_guarantied =
+                              PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+                       } else if (pfc_params->cos0_pauseable) {
+                               /* Lossless +Lossless*/
+                               e3b0_val->lb_guarantied =
+                                       PFC_E3B0_2P_PAUSE_LB_GUART;
+                               e3b0_val->mac_0_class_t_guarantied =
+                                  PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+                       } else {
+                               /* Lossy +Lossy*/
+                               e3b0_val->lb_guarantied =
+                                       PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+                               e3b0_val->mac_0_class_t_guarantied =
+                              PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+                       }
+               }
+       } else {
+               e3b0_val->per_class_guaranty_mode = 0;
+               e3b0_val->lb_guarantied_hyst = 0;
                e3b0_val->full_lb_xoff_th =
-                   PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+                       DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
                e3b0_val->full_lb_xon_threshold =
-                   PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+                       DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
                e3b0_val->lb_guarantied =
-                   PFC_E3B0_4P_LB_GUART;
+                       DEFAULT_E3B0_LB_GUART;
                e3b0_val->mac_0_class_t_guarantied =
-                   PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
                e3b0_val->mac_0_class_t_guarantied_hyst =
-                   PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+                       DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
                e3b0_val->mac_1_class_t_guarantied =
-                   PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
                e3b0_val->mac_1_class_t_guarantied_hyst =
-                   PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
-       } else {
-               e3b0_val->full_lb_xoff_th =
-                   PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
-               e3b0_val->full_lb_xon_threshold =
-                   PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
-               e3b0_val->mac_0_class_t_guarantied_hyst =
-                   PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
-               e3b0_val->mac_1_class_t_guarantied =
-                   PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
-               e3b0_val->mac_1_class_t_guarantied_hyst =
-                   PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
-               if (cos0_pauseable != cos1_pauseable) {
-                       /* nonpauseable= Lossy + pauseable = Lossless*/
-                       e3b0_val->lb_guarantied =
-                           PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                           PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
-               } else if (cos0_pauseable) {
-                       /* Lossless +Lossless*/
-                       e3b0_val->lb_guarantied =
-                           PFC_E3B0_2P_PAUSE_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                           PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
-               } else {
-                       /* Lossy +Lossy*/
-                       e3b0_val->lb_guarantied =
-                           PFC_E3B0_2P_NON_PAUSE_LB_GUART;
-                       e3b0_val->mac_0_class_t_guarantied =
-                           PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
-               }
+                       DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
        }
 }
 static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
        struct bnx2x *bp = params->bp;
        struct bnx2x_pfc_brb_th_val config_val = { {0} };
        struct bnx2x_pfc_brb_threshold_val *reg_th_config =
-           &config_val.pauseable_th;
+               &config_val.pauseable_th;
        struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
-       int set_pfc = params->feature_config_flags &
+       const int set_pfc = params->feature_config_flags &
                FEATURE_CONFIG_PFC_ENABLED;
+       const u8 pfc_enabled = (set_pfc && pfc_params);
        int bnx2x_status = 0;
        u8 port = params->port;
 
        /* default - pause configuration */
        reg_th_config = &config_val.pauseable_th;
        bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
-       if (0 != bnx2x_status)
+       if (bnx2x_status)
                return bnx2x_status;
 
-       if (set_pfc && pfc_params)
+       if (pfc_enabled) {
                /* First COS */
-               if (!pfc_params->cos0_pauseable)
+               if (pfc_params->cos0_pauseable)
+                       reg_th_config = &config_val.pauseable_th;
+               else
                        reg_th_config = &config_val.non_pauseable_th;
+       } else
+               reg_th_config = &config_val.default_class0;
        /*
         * The number of free blocks below which the pause signal to class 0
         * of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
        REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
               BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
 
-       if (set_pfc && pfc_params) {
+       if (pfc_enabled) {
                /* Second COS */
                if (pfc_params->cos1_pauseable)
                        reg_th_config = &config_val.pauseable_th;
                else
                        reg_th_config = &config_val.non_pauseable_th;
+       } else
+               reg_th_config = &config_val.default_class1;
+       /*
+        * The number of free blocks below which the pause signal to
+        * class 1 of MAC #n is asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+              BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+              reg_th_config->pause_xoff);
+
+       /*
+        * The number of free blocks above which the pause signal to
+        * class 1 of MAC #n is de-asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+              BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+              reg_th_config->pause_xon);
+       /*
+        * The number of free blocks below which the full signal to
+        * class 1 of MAC #n is asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+              BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+              reg_th_config->full_xoff);
+       /*
+        * The number of free blocks above which the full signal to
+        * class 1 of MAC #n is de-asserted. n=0,1
+        */
+       REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+              BRB1_REG_FULL_1_XON_THRESHOLD_0,
+              reg_th_config->full_xon);
+
+       if (CHIP_IS_E3B0(bp)) {
+               bnx2x_pfc_brb_get_e3b0_config_params(
+                       params,
+                       &e3b0_val,
+                       pfc_params,
+                       pfc_enabled);
+
+               REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+                          e3b0_val.per_class_guaranty_mode);
+
                /*
-                * The number of free blocks below which the pause signal to
-                * class 1 of MAC #n is asserted. n=0,1
-               **/
-               REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
-                      BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
-                      reg_th_config->pause_xoff);
+                * The hysteresis on the guarantied buffer space for the Lb
+                * port before signaling XON.
+                */
+               REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+                          e3b0_val.lb_guarantied_hyst);
+
                /*
-                * The number of free blocks above which the pause signal to
-                * class 1 of MAC #n is de-asserted. n=0,1
+                * The number of free blocks below which the full signal to the
+                * LB port is asserted.
                 */
-               REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
-                      BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
-                      reg_th_config->pause_xon);
+               REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+                      e3b0_val.full_lb_xoff_th);
                /*
-                * The number of free blocks below which the full signal to
-                * class 1 of MAC #n is asserted. n=0,1
+                * The number of free blocks above which the full signal to the
+                * LB port is de-asserted.
                 */
-               REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
-                      BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
-                      reg_th_config->full_xoff);
+               REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+                      e3b0_val.full_lb_xon_threshold);
                /*
-                * The number of free blocks above which the full signal to
-                * class 1 of MAC #n is de-asserted. n=0,1
+                * The number of blocks guarantied for the MAC #n port. n=0,1
                 */
-               REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
-                      BRB1_REG_FULL_1_XON_THRESHOLD_0,
-                      reg_th_config->full_xon);
 
+               /* The number of blocks guarantied for the LB port.*/
+               REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+                      e3b0_val.lb_guarantied);
 
-               if (CHIP_IS_E3B0(bp)) {
-                       /*Should be done by init tool */
-                       /*
-                       * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
-                       * reset value
-                       * 944
-                       */
-
-                       /**
-                        * The hysteresis on the guarantied buffer space for the Lb port
-                        * before signaling XON.
-                        **/
-                       REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
-                       bnx2x_pfc_brb_get_e3b0_config_params(
-                           params,
-                           &e3b0_val,
-                           pfc_params->cos0_pauseable,
-                           pfc_params->cos1_pauseable);
-                       /**
-                        * The number of free blocks below which the full signal to the
-                        * LB port is asserted.
-                       */
-                       REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
-                                  e3b0_val.full_lb_xoff_th);
-                       /**
-                        * The number of free blocks above which the full signal to the
-                        * LB port is de-asserted.
-                       */
-                       REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
-                                  e3b0_val.full_lb_xon_threshold);
-                       /**
-                       * The number of blocks guarantied for the MAC #n port. n=0,1
-                       */
-
-                       /*The number of blocks guarantied for the LB port.*/
-                       REG_WR(bp, BRB1_REG_LB_GUARANTIED,
-                              e3b0_val.lb_guarantied);
-
-                       /**
-                        * The number of blocks guarantied for the MAC #n port.
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
-                                  2 * e3b0_val.mac_0_class_t_guarantied);
-                       REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
-                                  2 * e3b0_val.mac_1_class_t_guarantied);
-                       /**
-                        * The number of blocks guarantied for class #t in MAC0. t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
-                              e3b0_val.mac_0_class_t_guarantied);
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
-                              e3b0_val.mac_0_class_t_guarantied);
-                       /**
-                        * The hysteresis on the guarantied buffer space for class in
-                        * MAC0.  t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
-                              e3b0_val.mac_0_class_t_guarantied_hyst);
-                       REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
-                              e3b0_val.mac_0_class_t_guarantied_hyst);
-
-                       /**
-                        * The number of blocks guarantied for class #t in MAC1.t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
-                              e3b0_val.mac_1_class_t_guarantied);
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
-                              e3b0_val.mac_1_class_t_guarantied);
-                       /**
-                        * The hysteresis on the guarantied buffer space for class #t
-                       * in MAC1.  t=0,1
-                       */
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
-                              e3b0_val.mac_1_class_t_guarantied_hyst);
-                       REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
-                              e3b0_val.mac_1_class_t_guarantied_hyst);
-
-           }
+               /*
+                * The number of blocks guarantied for the MAC #n port.
+                */
+               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+                      2 * e3b0_val.mac_0_class_t_guarantied);
+               REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+                      2 * e3b0_val.mac_1_class_t_guarantied);
+               /*
+                * The number of blocks guarantied for class #t in MAC0. t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+                      e3b0_val.mac_0_class_t_guarantied);
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+                      e3b0_val.mac_0_class_t_guarantied);
+               /*
+                * The hysteresis on the guarantied buffer space for class in
+                * MAC0.  t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+                      e3b0_val.mac_0_class_t_guarantied_hyst);
+               REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+                      e3b0_val.mac_0_class_t_guarantied_hyst);
 
+               /*
+                * The number of blocks guarantied for class #t in MAC1.t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+                      e3b0_val.mac_1_class_t_guarantied);
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+                      e3b0_val.mac_1_class_t_guarantied);
+               /*
+                * The hysteresis on the guarantied buffer space for class #t
+                * in MAC1.  t=0,1
+                */
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+                      e3b0_val.mac_1_class_t_guarantied_hyst);
+               REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+                      e3b0_val.mac_1_class_t_guarantied_hyst);
        }
 
        return bnx2x_status;
@@ -2515,7 +2619,7 @@ int bnx2x_update_pfc(struct link_params *params,
 
        /* update BRB params */
        bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
-       if (0 != bnx2x_status)
+       if (bnx2x_status)
                return bnx2x_status;
 
        if (!vars->link_up)
@@ -2533,7 +2637,6 @@ int bnx2x_update_pfc(struct link_params *params,
                        bnx2x_emac_enable(params, vars, 0);
                        return bnx2x_status;
                }
-
                if (CHIP_IS_E2(bp))
                        bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
                else
@@ -3053,7 +3156,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                DP(NETIF_MSG_LINK, "write phy register failed\n");
                netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
                rc = -EFAULT;
-
        } else {
                /* data */
                tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3192,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
                               EMAC_MDIO_STATUS_10MB);
        return rc;
 }
-
-
 /******************************************************************/
 /*                     BSC access functions from E3              */
 /******************************************************************/
@@ -3339,7 +3439,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
                aer_val = 0x3800 + offset - 1;
        else
                aer_val = 0x3800 + offset;
-       DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
        CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
                          MDIO_AER_BLOCK_AER_REG, aer_val);
 
@@ -3942,13 +4042,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
 
 static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
                                           struct link_params *params,
-                                          u8 fiber_mode)
+                                          u8 fiber_mode,
+                                          u8 always_autoneg)
 {
        struct bnx2x *bp = params->bp;
        u16 val16, digctrl_kx1, digctrl_kx2;
-       u8 lane;
-
-       lane = bnx2x_get_warpcore_lane(phy, params);
 
        /* Clear XFI clock comp in non-10G single lane mode. */
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4054,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
 
-       if (phy->req_line_speed == SPEED_AUTO_NEG) {
+       if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
                /* SGMII Autoneg */
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                                MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4065,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
        } else {
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                                MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-               val16 &= 0xcfbf;
+               val16 &= 0xcebf;
                switch (phy->req_line_speed) {
                case SPEED_10:
                        break;
@@ -4043,9 +4141,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                         MDIO_WC_REG_DIGITAL5_MISC6, &val);
 }
-
-
-       /* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
 static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
                                      struct link_params *params,
                                      u16 lane)
@@ -4250,7 +4346,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                vars->phy_flags |= PHY_SGMII_FLAG;
                DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
                bnx2x_warpcore_clear_regs(phy, params, lane);
-               bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+               bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
        } else {
                switch (serdes_net_if) {
                case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4374,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                                }
                                bnx2x_warpcore_set_sgmii_speed(phy,
                                                                params,
-                                                               fiber_mode);
+                                                               fiber_mode,
+                                                               0);
                        }
 
                        break;
@@ -4291,7 +4388,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
                                bnx2x_warpcore_set_10G_XFI(phy, params, 0);
                        } else if (vars->line_speed == SPEED_1000) {
                                DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
-                               bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+                               bnx2x_warpcore_set_sgmii_speed(
+                                               phy, params, 1, 0);
                        }
                        /* Issue Module detection */
                        if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4526,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
 
                /* Switch back to 4-copy registers */
                bnx2x_set_aer_mmd(params, phy);
-               /* Global loopback, not recommended. */
-               bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
-               bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
-                               MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
-                               0x4000);
        } else {
                /* 10G & 20G */
                bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4542,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
 }
 
 
-void bnx2x_link_status_update(struct link_params *params,
-                             struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+                          struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
        u8 link_10g_plus;
-       u8 port = params->port;
-       u32 sync_offset, media_types;
-       /* Update PHY configuration */
-       set_phy_vars(params, vars);
-
-       vars->link_status = REG_RD(bp, params->shmem_base +
-                                  offsetof(struct shmem_region,
-                                           port_mb[port].link_status));
-
-       vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
-       vars->phy_flags = PHY_XGXS_FLAG;
        if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
                vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+       vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
        if (vars->link_up) {
                DP(NETIF_MSG_LINK, "phy link up\n");
 
@@ -4563,7 +4644,23 @@ void bnx2x_link_status_update(struct link_params *params,
                if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
                        vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
        }
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+                             struct link_vars *vars)
+{
+       struct bnx2x *bp = params->bp;
+       u8 port = params->port;
+       u32 sync_offset, media_types;
+       /* Update PHY configuration */
+       set_phy_vars(params, vars);
+
+       vars->link_status = REG_RD(bp, params->shmem_base +
+                                  offsetof(struct shmem_region,
+                                           port_mb[port].link_status));
 
+       vars->phy_flags = PHY_XGXS_FLAG;
+       bnx2x_sync_link(params, vars);
        /* Sync media type */
        sync_offset = params->shmem_base +
                        offsetof(struct shmem_region,
@@ -4602,7 +4699,6 @@ void bnx2x_link_status_update(struct link_params *params,
                 vars->line_speed, vars->duplex, vars->flow_ctrl);
 }
 
-
 static void bnx2x_set_master_ln(struct link_params *params,
                                struct bnx2x_phy *phy)
 {
@@ -4676,11 +4772,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
         *  Each two bits represents a lane number:
         *  No swap is 0123 => 0x1b no need to enable the swap
         */
-       u16 ser_lane, rx_lane_swap, tx_lane_swap;
+       u16 rx_lane_swap, tx_lane_swap;
 
-       ser_lane = ((params->lane_config &
-                    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-                   PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
        rx_lane_swap = ((params->lane_config &
                         PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
                        PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5449,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
                                      struct link_params *params,
                                      struct link_vars *vars)
 {
-
        struct bnx2x *bp = params->bp;
 
        u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5495,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
                                     struct link_params *params,
                                     struct link_vars *vars)
 {
-
        struct bnx2x *bp = params->bp;
-
        u8 lane;
        u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
        int rc = 0;
@@ -6678,7 +6768,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
        return rc;
 }
 
-
 /*****************************************************************************/
 /*                         External Phy section                             */
 /*****************************************************************************/
@@ -8103,7 +8192,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
 static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
                                    struct link_params *params)
 {
+       struct bnx2x *bp = params->bp;
        bnx2x_warpcore_power_module(params, phy, 0);
+       /* Put Warpcore in low power mode */
+       REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+       /* Put LCPLL in low power mode */
+       REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+       REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+       REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
 }
 
 static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9137,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK,
                           "8727 Power fault has been detected on port %d\n",
                           oc_port);
-                       netdev_err(bp->dev, "Error:  Power fault on Port %d has"
-                                           " been detected and the power to "
-                                           "that SFP+ module has been removed"
-                                           " to prevent failure of the card."
-                                           " Please remove the SFP+ module and"
-                                           " restart the system to clear this"
-                                           " error.\n",
+                       netdev_err(bp->dev, "Error: Power fault on Port %d has "
+                                           "been detected and the power to "
+                                           "that SFP+ module has been removed "
+                                           "to prevent failure of the card. "
+                                           "Please remove the SFP+ module and "
+                                           "restart the system to clear this "
+                                           "error.\n",
                         oc_port);
                        /* Disable all RX_ALARMs except for mod_abs */
                        bnx2x_cl45_write(bp, phy,
@@ -9228,7 +9325,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
-       u16 val;
+       u16 val, offset;
 
        /* PHYC_CTL_LED_CTL */
        bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9360,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                        MDIO_PMA_REG_8481_LED3_BLINK,
                        0);
 
-       bnx2x_cl45_read(bp, phy,
+       /* Configure the blink rate to ~15.9 Hz */
+       bnx2x_cl45_write(bp, phy,
                        MDIO_PMA_DEVAD,
-                       MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
-       val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+                       MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+                       MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+               offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+       else
+               offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
 
+       bnx2x_cl45_read(bp, phy,
+                       MDIO_PMA_DEVAD, offset, &val);
+       val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
        bnx2x_cl45_write(bp, phy,
-                        MDIO_PMA_DEVAD,
-                        MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+                        MDIO_PMA_DEVAD, offset, val);
 
        /* 'Interrupt Mask' */
        bnx2x_cl45_write(bp, phy,
@@ -9283,7 +9388,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                       struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u16 autoneg_val, an_1000_val, an_10_100_val;
+       u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
        u16 tmp_req_line_speed;
 
        tmp_req_line_speed = phy->req_line_speed;
@@ -9378,6 +9483,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
                                 (1<<15 | 1<<9 | 7<<0));
+               /* The PHY needs this set even for forced link. */
+               an_10_100_val |= (1<<8) | (1<<7);
                DP(NETIF_MSG_LINK, "Setting 100M force\n");
        }
        if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,9 +9522,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                        DP(NETIF_MSG_LINK, "Advertising 10G\n");
                        /* Restart autoneg for 10G*/
 
+                       bnx2x_cl45_read(bp, phy,
+                                       MDIO_AN_DEVAD,
+                                       MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+                                       &an_10g_val);
+                       bnx2x_cl45_write(bp, phy,
+                                        MDIO_AN_DEVAD,
+                                        MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+                                        an_10g_val | 0x1000);
                        bnx2x_cl45_write(bp, phy,
-                                MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
-                                0x3200);
+                                        MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+                                        0x3200);
        } else
                bnx2x_cl45_write(bp, phy,
                                 MDIO_AN_DEVAD,
@@ -9449,74 +9564,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-
-#define PHY84833_HDSHK_WAIT 300
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
                                   struct link_params *params,
-                                  struct link_vars *vars)
+                  u16 fw_cmd,
+                  u16 cmd_args[])
 {
        u32 idx;
-       u32 pair_swap;
        u16 val;
-       u16 data;
        struct bnx2x *bp = params->bp;
-       /* Do pair swap */
-
-       /* Check for configuration. */
-       pair_swap = REG_RD(bp, params->shmem_base +
-                          offsetof(struct shmem_region,
-                       dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
-               PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
-
-       if (pair_swap == 0)
-               return 0;
-
-       data = (u16)pair_swap;
-
        /* Write CMD_OPEN_OVERRIDE to STATUS reg */
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG2,
-                       PHY84833_CMD_OPEN_OVERRIDE);
-       for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+                       MDIO_84833_CMD_HDLR_STATUS,
+                       PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-               if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+                               MDIO_84833_CMD_HDLR_STATUS, &val);
+               if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
                        break;
                msleep(1);
        }
-       if (idx >= PHY84833_HDSHK_WAIT) {
-               DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+       if (idx >= PHY84833_CMDHDLR_WAIT) {
+               DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
                return -EINVAL;
        }
 
+       /* Prepare argument(s) and issue command */
+       for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               cmd_args[idx]);
+       }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG4,
-                       data);
-       /* Issue pair swap command */
-       bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG0,
-                       PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
-       for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+                       MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-               if ((val == PHY84833_CMD_COMPLETE_PASS) ||
-                       (val == PHY84833_CMD_COMPLETE_ERROR))
+                               MDIO_84833_CMD_HDLR_STATUS, &val);
+               if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+                       (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
                        break;
                msleep(1);
        }
-       if ((idx >= PHY84833_HDSHK_WAIT) ||
-               (val == PHY84833_CMD_COMPLETE_ERROR)) {
-               DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+       if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+               (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+               DP(NETIF_MSG_LINK, "FW cmd failed.\n");
                return -EINVAL;
        }
+       /* Gather returning data */
+       for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               &cmd_args[idx]);
+       }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_TOP_CFG_SCRATCH_REG2,
-                       PHY84833_CMD_CLEAR_COMPLETE);
-       DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+                       MDIO_84833_CMD_HDLR_STATUS,
+                       PHY84833_STATUS_CMD_CLEAR_COMPLETE);
        return 0;
 }
 
 
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+                                  struct link_params *params,
+                                  struct link_vars *vars)
+{
+       u32 pair_swap;
+       u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+       int status;
+       struct bnx2x *bp = params->bp;
+
+       /* Check for configuration. */
+       pair_swap = REG_RD(bp, params->shmem_base +
+                          offsetof(struct shmem_region,
+                       dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+               PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+       if (pair_swap == 0)
+               return 0;
+
+       /* Only the second argument is used for this command */
+       data[1] = (u16)pair_swap;
+
+       status = bnx2x_84833_cmd_hdlr(phy, params,
+               PHY84833_CMD_SET_PAIR_SWAP, data);
+       if (status == 0)
+               DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+       return status;
+}
+
 static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
                                      u32 shmem_base_path[],
                                      u32 chip_id)
@@ -9579,24 +9715,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
        return 0;
 }
 
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
-                                               u32 shmem_base_path[],
-                                               u32 chip_id)
-{
-       u8 reset_gpios;
-
-       reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
-       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
-       udelay(10);
-       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
-       msleep(800);
-       DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
-               reset_gpios);
-
-       return 0;
-}
-
 #define PHY84833_CONSTANT_LATENCY 1193
 static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                                   struct link_params *params,
@@ -9605,8 +9723,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u8 port, initialize = 1;
        u16 val;
-       u16 temp;
-       u32 actual_phy_selection, cms_enable, idx;
+       u32 actual_phy_selection, cms_enable;
+       u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
        int rc = 0;
 
        msleep(1);
@@ -9625,6 +9743,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_CTRL, 0x8000);
+       }
+
+       bnx2x_wait_reset_complete(bp, phy, params);
+
+       /* Wait for GPHY to come out of reset */
+       msleep(50);
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                /* Bring PHY out of super isolate mode */
                bnx2x_cl45_read(bp, phy,
                                MDIO_CTL_DEVAD,
@@ -9633,26 +9758,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_cl45_write(bp, phy,
                                MDIO_CTL_DEVAD,
                                MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-       }
-
-       bnx2x_wait_reset_complete(bp, phy, params);
-
-       /* Wait for GPHY to come out of reset */
-       msleep(50);
-
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
                bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
-       /*
-        * BCM84823 requires that XGXS links up first @ 10G for normal behavior
-        */
-       temp = vars->line_speed;
-       vars->line_speed = SPEED_10000;
-       bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
-       bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
-       vars->line_speed = temp;
-
-       /* Set dual-media configuration according to configuration */
+       } else {
+               /*
+                * BCM84823 requires that XGXS links up first @ 10G for normal
+                * behavior.
+                */
+               u16 temp;
+               temp = vars->line_speed;
+               vars->line_speed = SPEED_10000;
+               bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+               bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+               vars->line_speed = temp;
+       }
 
        bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
                        MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9700,64 +9818,18 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* AutogrEEEn */
        if (params->feature_config_flags &
-               FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
-               /* Ensure that f/w is ready */
-               for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-                       bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                                       MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-                       if (val == PHY84833_CMD_OPEN_FOR_CMDS)
-                               break;
-                       usleep_range(1000, 1000);
-               }
-               if (idx >= PHY84833_HDSHK_WAIT) {
-                       DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
-                       return -EINVAL;
-               }
-
-               /* Select EEE mode */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG3,
-                               0x2);
-
-               /* Set Idle and Latency */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG4,
-                               PHY84833_CONSTANT_LATENCY + 1);
-
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_DATA3_REG,
-                               PHY84833_CONSTANT_LATENCY + 1);
-
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_DATA4_REG,
-                               PHY84833_CONSTANT_LATENCY);
-
-               /* Send EEE instruction to command register */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_SCRATCH_REG0,
-                               PHY84833_DIAG_CMD_SET_EEE_MODE);
-
-               /* Ensure that the command has completed */
-               for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
-                       bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                                       MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
-                       if ((val == PHY84833_CMD_COMPLETE_PASS) ||
-                               (val == PHY84833_CMD_COMPLETE_ERROR))
-                               break;
-                       usleep_range(1000, 1000);
-               }
-               if ((idx >= PHY84833_HDSHK_WAIT) ||
-                       (val == PHY84833_CMD_COMPLETE_ERROR)) {
-                       DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
-                       return -EINVAL;
-               }
-
-               /* Reset command handler */
-               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                           MDIO_84833_TOP_CFG_SCRATCH_REG2,
-                           PHY84833_CMD_CLEAR_COMPLETE);
-       }
+               FEATURE_CONFIG_AUTOGREEEN_ENABLED)
+               cmd_args[0] = 0x2;
+       else
+               cmd_args[0] = 0x0;
 
+       cmd_args[1] = 0x0;
+       cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+       cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+       rc = bnx2x_84833_cmd_hdlr(phy, params,
+               PHY84833_CMD_SET_EEE_MODE, cmd_args);
+       if (rc != 0)
+               DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
        if (initialize)
                rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
        else
@@ -10144,8 +10216,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "54618SE cfg init\n");
        usleep_range(1000, 1000);
 
-       /* This works with E3 only, no need to check the chip
-          before determining the port. */
+       /*
+        * This works with E3 only, no need to check the chip
+        * before determining the port.
+        */
        port = params->port;
 
        cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -11218,7 +11292,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
                                       offsetof(struct shmem_region,
                        dev_info.port_feature_config[port].link_config)) &
                          PORT_FEATURE_CONNECTED_SWITCH_MASK);
-       chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+       chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+               ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
        DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
        if (USES_WARPCORE(bp)) {
                u32 serdes_net_if;
@@ -11397,6 +11473,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
                return -EINVAL;
        default:
                *phy = phy_null;
+               /* In case external PHY wasn't found */
+               if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+                   (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+                       return -EINVAL;
                return 0;
        }
 
@@ -11570,7 +11650,7 @@ u32 bnx2x_phy_selection(struct link_params *params)
 
 int bnx2x_phy_probe(struct link_params *params)
 {
-       u8 phy_index, actual_phy_idx, link_cfg_idx;
+       u8 phy_index, actual_phy_idx;
        u32 phy_config_swapped, sync_offset, media_types;
        struct bnx2x *bp = params->bp;
        struct bnx2x_phy *phy;
@@ -11581,7 +11661,6 @@ int bnx2x_phy_probe(struct link_params *params)
 
        for (phy_index = INT_PHY; phy_index < MAX_PHYS;
              phy_index++) {
-               link_cfg_idx = LINK_CONFIG_IDX(phy_index);
                actual_phy_idx = phy_index;
                if (phy_config_swapped) {
                        if (phy_index == EXT_PHY1)
@@ -12247,6 +12326,63 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
        return 0;
 }
 
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+                                               u32 shmem_base_path[],
+                                               u32 shmem2_base_path[],
+                                               u8 phy_index,
+                                               u32 chip_id)
+{
+       u8 reset_gpios;
+       struct bnx2x_phy phy;
+       u32 shmem_base, shmem2_base, cnt;
+       s8 port = 0;
+       u16 val;
+
+       reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+       udelay(10);
+       bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+       DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+               reset_gpios);
+       for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+               /* This PHY is for E2 and E3. */
+               shmem_base = shmem_base_path[port];
+               shmem2_base = shmem2_base_path[port];
+               /* Extract the ext phy address for the port */
+               if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+                                      0, &phy) !=
+                   0) {
+                       DP(NETIF_MSG_LINK, "populate_phy failed\n");
+                       return -EINVAL;
+               }
+
+               /* Wait for FW completing its initialization. */
+               for (cnt = 0; cnt < 1000; cnt++) {
+                       bnx2x_cl45_read(bp, &phy,
+                               MDIO_PMA_DEVAD,
+                               MDIO_PMA_REG_CTRL, &val);
+                       if (!(val & (1<<15)))
+                               break;
+                       msleep(1);
+               }
+               if (cnt >= 1000)
+                       DP(NETIF_MSG_LINK,
+                               "84833 Cmn reset timeout (%d)\n", port);
+
+               /* Put the port in super isolate mode. */
+               bnx2x_cl45_read(bp, &phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+               val |= MDIO_84833_SUPER_ISOLATE;
+               bnx2x_cl45_write(bp, &phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+       }
+
+       return 0;
+}
+
+
 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                                     u32 shmem2_base_path[], u8 phy_index,
                                     u32 ext_phy_type, u32 chip_id)
@@ -12281,7 +12417,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                 * GPIO3's are linked, and so both need to be toggled
                 * to obtain required 2us pulse.
                 */
-               rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+               rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+                                               shmem2_base_path,
+                                               phy_index, chip_id);
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
                rc = -EINVAL;
index 2a46e63..e02a68a 100644 (file)
@@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
 /*  Configure the COS to ETS according to BW and SP settings.*/
 int bnx2x_ets_e3b0_config(const struct link_params *params,
                         const struct link_vars *vars,
-                        const struct bnx2x_ets_params *ets_params);
+                        struct bnx2x_ets_params *ets_params);
 /* Read pfc statistic*/
 void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
                                                 u32 pfc_frames_sent[2],
index 2f6361e..ffeaaa9 100644 (file)
@@ -2318,12 +2318,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
 }
 
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
-       return 2 * vn + BP_PORT(bp);
-}
-
 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
 {
        struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2469,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
           "rate shaping and fairness are disabled\n");
 }
 
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
-       int func;
-       int vn;
-
-       /* Set the attention towards other drivers on the same port */
-       for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
-               if (vn == BP_VN(bp))
-                       continue;
-
-               func = func_by_vn(bp, vn);
-               REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
-                      (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
-       }
-}
-
 /* This function is called upon link interrupt */
 static void bnx2x_link_attn(struct bnx2x *bp)
 {
@@ -2549,6 +2527,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
        if (bp->state != BNX2X_STATE_OPEN)
                return;
 
+       /* read updated dcb configuration */
+       bnx2x_dcbx_pmf_update(bp);
+
        bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 
        if (bp->link_vars.link_up)
@@ -2643,15 +2624,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
        return rc;
 }
 
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
-       /* Statistics are not supported for CNIC Clients at the moment */
-       if (IS_FCOE_FP(fp))
-               return false;
-#endif
-       return true;
-}
 
 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
 {
@@ -2695,11 +2667,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
         *  parent connection). The statistics are zeroed when the parent
         *  connection is initialized.
         */
-       if (stat_counter_valid(bp, fp)) {
-               __set_bit(BNX2X_Q_FLG_STATS, &flags);
-               if (zero_stats)
-                       __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
-       }
+
+       __set_bit(BNX2X_Q_FLG_STATS, &flags);
+       if (zero_stats)
+               __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
 
        return flags;
 }
@@ -2808,8 +2780,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
        /* This should be a maximum number of data bytes that may be
         * placed on the BD (not including paddings).
         */
-       rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
-               IP_HEADER_ALIGNMENT_PADDING;
+       rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+               BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
 
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2912,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
         */
 }
 
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+       struct eth_stats_info *ether_stat =
+               &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+       /* leave last char as NULL */
+       memcpy(ether_stat->version, DRV_MODULE_VERSION,
+              ETH_STAT_INFO_VERSION_LEN - 1);
+
+       bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+                                        DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+                                        ether_stat->mac_local);
+
+       ether_stat->mtu_size = bp->dev->mtu;
+
+       if (bp->dev->features & NETIF_F_RXCSUM)
+               ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+       if (bp->dev->features & NETIF_F_TSO)
+               ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+       ether_stat->feature_flags |= bp->common.boot_mode;
+
+       ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+       ether_stat->txq_size = bp->tx_ring_size;
+       ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct fcoe_stats_info *fcoe_stat =
+               &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+       memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+       fcoe_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+       /* insert FCoE stats from ramrod response */
+       if (!NO_FCOE(bp)) {
+               struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       tstorm_queue_statistics;
+
+               struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+                       &bp->fw_stats_data->queue_stats[FCOE_IDX].
+                       xstorm_queue_statistics;
+
+               struct fcoe_statistics_params *fw_fcoe_stat =
+                       &bp->fw_stats_data->fcoe;
+
+               ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_bytes_hi,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+                      fcoe_stat->rx_bytes_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+               ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+                      fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+               ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_bytes_hi,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+                      fcoe_stat->tx_bytes_lo,
+                      fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+               ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+                      fcoe_q_xstorm_stats->mcast_pkts_sent);
+       }
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+       struct iscsi_stats_info *iscsi_stat =
+               &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+       memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+       iscsi_stat->qos_priority =
+               app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+       /* ask L5 driver to add data to the struct */
+       bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
 /* called due to MCP event (on pmf):
  *     reread new bandwidth configuration
  *     configure FW
@@ -2960,6 +3069,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 }
 
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+       enum drv_info_opcode op_code;
+       u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+       /* if drv_info version supported by MFW doesn't match - send NACK */
+       if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+                 DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+
+       switch (op_code) {
+       case ETH_STATS_OPCODE:
+               bnx2x_drv_info_ether_stat(bp);
+               break;
+       case FCOE_STATS_OPCODE:
+               bnx2x_drv_info_fcoe_stat(bp);
+               break;
+       case ISCSI_STATS_OPCODE:
+               bnx2x_drv_info_iscsi_stat(bp);
+               break;
+       default:
+               /* if op code isn't supported - send NACK */
+               bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+               return;
+       }
+
+       /* if we got drv_info attn from MFW then these fields are defined in
+        * shmem2 for sure
+        */
+       SHMEM2_WR(bp, drv_info_host_addr_lo,
+               U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+       SHMEM2_WR(bp, drv_info_host_addr_hi,
+               U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+       bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
        DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3471,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
        netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
               " the driver to shutdown the card to prevent permanent"
               " damage.  Please contact OEM Support for assistance\n");
+
+       /*
+        * Scheudle device reset (unload)
+        * This is due to some boards consuming sufficient power when driver is
+        * up to overheat if fan fails.
+        */
+       smp_mb__before_clear_bit();
+       set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+       smp_mb__after_clear_bit();
+       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
 }
 
 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3620,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                        if (val & DRV_STATUS_SET_MF_BW)
                                bnx2x_set_mf_bw(bp);
 
+                       if (val & DRV_STATUS_DRV_INFO_REQ)
+                               bnx2x_handle_drv_info_req(bp);
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
 
@@ -5247,7 +5413,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
        u8 cos;
        unsigned long q_type = 0;
        u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+       fp->rx_queue = fp_idx;
        fp->cid = fp_idx;
        fp->cl_id = bnx2x_fp_cl_id(fp);
        fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7022,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
 static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 {
        int num_groups;
+       int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
 
-       /* number of eth_queues */
-       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+       /* number of queues for statistics is number of eth queues + FCoE */
+       u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 
        /* Total number of FW statistics requests =
-        * 1 for port stats + 1 for PF stats + num_eth_queues */
-       bp->fw_stats_num = 2 + num_queue_stats;
+        * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+        * num of queues
+        */
+       bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 
 
        /* Request is built from stats_query_header and an array of
@@ -6870,8 +7039,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         * STATS_QUERY_CMD_COUNT rules. The real number or requests is
         * configured in the stats_query_header.
         */
-       num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
-               (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+       num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+                    (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 
        bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
                        num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7049,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
         *
         * stats_counter holds per-STORM counters that are incremented
         * when STORM has finished with the current request.
+        *
+        * memory for FCoE offloaded statistics are counted anyway,
+        * even if they will not be sent.
         */
        bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
                sizeof(struct per_pf_stats) +
+               sizeof(struct fcoe_statistics_params) +
                sizeof(struct per_queue_stats) * num_queue_stats +
                sizeof(struct stats_counter);
 
@@ -7025,6 +7198,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
 {
        unsigned long ramrod_flags = 0;
 
+#ifdef BCM_CNIC
+       if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+               DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+               return 0;
+       }
+#endif
+
        DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
 
        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8702,17 @@ sp_rtnl_not_reset:
        if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
                bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
 
+       /*
+        * in case of fan failure we need to reset id if the "stop on error"
+        * debug flag is set, since we trying to prevent permanent overheating
+        * damage
+        */
+       if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+               DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+               netif_device_detach(bp->dev);
+               bnx2x_close(bp->dev);
+       }
+
 sp_rtnl_exit:
        rtnl_unlock();
 }
@@ -8708,7 +8899,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
 
 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
 {
-       u32 val, val2, val3, val4, id;
+       u32 val, val2, val3, val4, id, boot_mode;
        u16 pmc;
 
        /* Get the chip revision id and number. */
@@ -8817,6 +9008,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
        bp->link_params.feature_config_flags |=
                (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
                FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+       bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+                       BC_SUPPORTS_PFC_STATS : 0;
+
+       boot_mode = SHMEM_RD(bp,
+                       dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+                       PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+       switch (boot_mode) {
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+               break;
+       case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+               bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+               break;
+       }
 
        pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
        bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9478,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
                                                        bp->common.shmem2_base);
 }
 
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
 {
+#ifdef BCM_CNIC
        int port = BP_PORT(bp);
-       int func = BP_ABS_FUNC(bp);
 
        u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
                                drv_lic_key[port].max_iscsi_conn);
-       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
-                               drv_lic_key[port].max_fcoe_conn);
 
-       /* Get the number of maximum allowed iSCSI and FCoE connections */
+       /* Get the number of maximum allowed iSCSI connections */
        bp->cnic_eth_dev.max_iscsi_conn =
                (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
                BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 
+       BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+                      bp->cnic_eth_dev.max_iscsi_conn);
+
+       /*
+        * If maximum allowed number of connections is zero -
+        * disable the feature.
+        */
+       if (!bp->cnic_eth_dev.max_iscsi_conn)
+               bp->flags |= NO_ISCSI_FLAG;
+#else
+       bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+       int port = BP_PORT(bp);
+       int func = BP_ABS_FUNC(bp);
+
+       u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+                               drv_lic_key[port].max_fcoe_conn);
+
+       /* Get the number of maximum allowed FCoE connections */
        bp->cnic_eth_dev.max_fcoe_conn =
                (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
                BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9566,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
                }
        }
 
-       BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
-                      bp->cnic_eth_dev.max_iscsi_conn,
-                      bp->cnic_eth_dev.max_fcoe_conn);
+       BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
 
        /*
         * If maximum allowed number of connections is zero -
         * disable the feature.
         */
-       if (!bp->cnic_eth_dev.max_iscsi_conn)
-               bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
        if (!bp->cnic_eth_dev.max_fcoe_conn)
                bp->flags |= NO_FCOE_FLAG;
-}
+#else
+       bp->flags |= NO_FCOE_FLAG;
 #endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+       /*
+        * iSCSI may be dynamically disabled but reading
+        * info here we will decrease memory usage by driver
+        * if the feature is disabled for good
+        */
+       bnx2x_get_iscsi_info(bp);
+       bnx2x_get_fcoe_info(bp);
+}
 
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
@@ -9374,7 +9614,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-               /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+               /*
+                * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
                 * FCoE MAC then the appropriate feature should be disabled.
                 */
                if (IS_MF_SI(bp)) {
@@ -9396,11 +9637,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    fcoe_mac_addr_lower);
                                bnx2x_set_mac_buf(fip_mac, val, val2);
-                               BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+                               BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
                                               fip_mac);
 
                        } else
                                bp->flags |= NO_FCOE_FLAG;
+               } else { /* SD mode */
+                       if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+                               /* use primary mac as iscsi mac */
+                               memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+                               /* Zero primary MAC configuration */
+                               memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+                               BNX2X_DEV_INFO("SD ISCSI MODE\n");
+                               BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+                                              iscsi_mac);
+                       }
                }
 #endif
        } else {
@@ -9449,7 +9701,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
        }
 #endif
 
-       if (!is_valid_ether_addr(bp->dev->dev_addr))
+       if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
                dev_err(&bp->pdev->dev,
                        "bad Ethernet MAC address configuration: "
                        "%pM, change it manually before bringing up "
@@ -9661,9 +9913,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
 
-#ifdef BCM_CNIC
        bnx2x_get_cnic_info(bp);
-#endif
 
        /* Get current FW pulse sequence */
        if (!BP_NOMCP(bp)) {
@@ -9681,30 +9931,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
 {
        int cnt, i, block_end, rodi;
-       char vpd_data[BNX2X_VPD_LEN+1];
+       char vpd_start[BNX2X_VPD_LEN+1];
        char str_id_reg[VENDOR_ID_LEN+1];
        char str_id_cap[VENDOR_ID_LEN+1];
+       char *vpd_data;
+       char *vpd_extended_data = NULL;
        u8 len;
 
-       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+       cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
        memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
 
        if (cnt < BNX2X_VPD_LEN)
                goto out_not_found;
 
-       i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+       /* VPD RO tag should be first tag after identifier string, hence
+        * we should be able to find it in first BNX2X_VPD_LEN chars
+        */
+       i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
                             PCI_VPD_LRDT_RO_DATA);
        if (i < 0)
                goto out_not_found;
 
-
        block_end = i + PCI_VPD_LRDT_TAG_SIZE +
-                   pci_vpd_lrdt_size(&vpd_data[i]);
+                   pci_vpd_lrdt_size(&vpd_start[i]);
 
        i += PCI_VPD_LRDT_TAG_SIZE;
 
-       if (block_end > BNX2X_VPD_LEN)
-               goto out_not_found;
+       if (block_end > BNX2X_VPD_LEN) {
+               vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+               if (vpd_extended_data  == NULL)
+                       goto out_not_found;
+
+               /* read rest of vpd image into vpd_extended_data */
+               memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+               cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+                                  block_end - BNX2X_VPD_LEN,
+                                  vpd_extended_data + BNX2X_VPD_LEN);
+               if (cnt < (block_end - BNX2X_VPD_LEN))
+                       goto out_not_found;
+               vpd_data = vpd_extended_data;
+       } else
+               vpd_data = vpd_start;
+
+       /* now vpd_data holds full vpd content in both cases */
 
        rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
                                   PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +10005,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
                                bp->fw_ver[len] = ' ';
                        }
                }
+               kfree(vpd_extended_data);
                return;
        }
 out_not_found:
+       kfree(vpd_extended_data);
        return;
 }
 
@@ -9840,15 +10111,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
 
        bp->multi_mode = multi_mode;
 
+       bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+       bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
        /* Set TPA flags */
-       if (disable_tpa) {
+       if (bp->disable_tpa) {
                bp->flags &= ~TPA_ENABLE_FLAG;
                bp->dev->features &= ~NETIF_F_LRO;
        } else {
                bp->flags |= TPA_ENABLE_FLAG;
                bp->dev->features |= NETIF_F_LRO;
        }
-       bp->disable_tpa = disable_tpa;
 
        if (CHIP_IS_E1(bp))
                bp->dropless_fc = 0;
@@ -9965,7 +10241,7 @@ static int bnx2x_open(struct net_device *dev)
 }
 
 /* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
@@ -10119,6 +10395,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
        }
 
        bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+       /* handle ISCSI SD mode */
+       if (IS_MF_ISCSI_SD(bp))
+               bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
 
        /* Schedule the rx_mode command */
        if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10479,15 @@ static void poll_bnx2x(struct net_device *dev)
 }
 #endif
 
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+               return -EADDRNOTAVAIL;
+       return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -10205,7 +10495,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_select_queue       = bnx2x_select_queue,
        .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_validate_addr      = bnx2x_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
        .ndo_change_mtu         = bnx2x_change_mtu,
        .ndo_fix_features       = bnx2x_fix_features,
@@ -10823,8 +11113,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 
 #ifdef BCM_CNIC
-       /* disable FCOE L2 queue for E1x and E3*/
-       if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+       /* disable FCOE L2 queue for E1x */
+       if (CHIP_IS_E1x(bp))
                bp->flags |= NO_FCOE_FLAG;
 
 #endif
@@ -11486,6 +11776,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                smp_mb__after_atomic_inc();
                break;
        }
+       case DRV_CTL_ULP_REGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
+       case DRV_CTL_ULP_UNREGISTER_CMD: {
+               int ulp_type = ctl->data.ulp_type;
+
+               if (CHIP_IS_E3(bp)) {
+                       int idx = BP_FW_MB_IDX(bp);
+                       u32 cap;
+
+                       cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+                       if (ulp_type == CNIC_ULP_ISCSI)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+                       else if (ulp_type == CNIC_ULP_FCOE)
+                               cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+                       SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+               }
+               break;
+       }
 
        default:
                BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11561,7 +11883,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
 
        mutex_lock(&bp->cnic_mutex);
        cp->drv_state = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
        synchronize_rcu();
        kfree(bp->cnic_kwq);
index e58073e..44609de 100644 (file)
 #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1                         0x6007c
 /* [RW 10] Write client 0: Assert pause threshold. */
 #define BRB1_REG_PAUSE_LOW_THRESHOLD_0                          0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1                          0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE                        0x60268
+/* [R 24] The number of full blocks occpied by port. */
 #define BRB1_REG_PORT_NUM_OCC_BLOCKS_0                          0x60094
 /* [RW 1] Reset the design by software. */
 #define BRB1_REG_SOFT_RESET                                     0x600dc
    register bits. */
 #define MISC_REG_LCPLL_CTRL_1                                   0xa2a4
 #define MISC_REG_LCPLL_CTRL_REG_2                               0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN                               0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA                           0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG                           0xaa7c
 /* [RW 4] Interrupt mask register #0 read/write */
 #define MISC_REG_MISC_INT_MASK                                  0xa388
 /* [RW 1] Parity mask register #0 read/write */
  * is compared to the value on ctrl_md_devad. Drives output
  * misc_xgxs0_phy_addr. Global register. */
 #define MISC_REG_WC0_CTRL_PHY_ADDR                              0xa9cc
+#define MISC_REG_WC0_RESET                                      0xac30
 /* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
    side. This should be less than or equal to phy_port_mode; if some of the
    ports are not used. This enables reduction of frequency on the core side.
@@ -6823,11 +6835,13 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER       0x0000
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER                0x0100
 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G                      0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG               0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS               0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1               0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN             0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG                       0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS                       0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH               0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ       0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1                       0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1                       0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN                     0x0080
 
 /* BCM84833 only */
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1                        0x401a
@@ -6838,26 +6852,35 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_84833_TOP_CFG_SCRATCH_REG2                        0x4007
 #define MDIO_84833_TOP_CFG_SCRATCH_REG3                        0x4008
 #define MDIO_84833_TOP_CFG_SCRATCH_REG4                        0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG                   0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG                   0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26               0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27               0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28               0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29               0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30               0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31               0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND    MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS     MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1      MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2      MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3      MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4      MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5      MDIO_84833_TOP_CFG_SCRATCH_REG31
 
 /* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE             0x2
+#define PHY84833_CMD_SET_PAIR_SWAP                     0x8001
+#define PHY84833_CMD_GET_EEE_MODE                      0x8008
+#define PHY84833_CMD_SET_EEE_MODE                      0x8009
 /* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED                          0x0001
-#define PHY84833_CMD_IN_PROGRESS                       0x0002
-#define PHY84833_CMD_COMPLETE_PASS                     0x0004
-#define PHY84833_CMD_COMPLETE_ERROR                    0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS                     0x0010
-#define PHY84833_CMD_SYSTEM_BOOT                       0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS                 0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE                    0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE                     0xa5a5
-
+#define PHY84833_STATUS_CMD_RECEIVED                   0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS                        0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS              0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR             0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS              0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT                        0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS          0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE             0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE              0xa5a5
 
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE                 0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE                 0x28
 
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD                                  0x3
index 1451769..5ac6160 100644 (file)
@@ -30,6 +30,8 @@
 
 #define BNX2X_MAX_EMUL_MULTI           16
 
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
 /**** Exe Queue interfaces ****/
 
 /**
@@ -441,6 +443,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
        return true;
 }
 
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+                               int n, u8 *buf)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+       u8 *next = buf;
+       int counter = 0;
+
+       /* traverse list */
+       list_for_each_entry(pos, &o->head, link) {
+               if (counter < n) {
+                       /* place leading zeroes in buffer */
+                       memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+                       /* place mac after leading zeroes*/
+                       memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+                              ETH_ALEN);
+
+                       /* calculate address of next element and
+                        * advance counter
+                        */
+                       counter++;
+                       next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+                       DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+                          counter, next, pos->u.mac.mac);
+               }
+       }
+       return counter * ETH_ALEN;
+}
+
 /* check_add() callbacks */
 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
                               union bnx2x_classification_ramrod_data *data)
@@ -1886,6 +1918,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
                mac_obj->check_move        = bnx2x_check_move;
                mac_obj->ramrod_cmd        =
                        RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+               mac_obj->get_n_elements    = bnx2x_get_n_elements;
 
                /* Exe Queue */
                bnx2x_exe_queue_init(bp,
@@ -3342,7 +3375,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
                if (!list_empty(&o->registry.exact_match.macs))
                        return 0;
 
-               elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+               elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
                if (!elem) {
                        BNX2X_ERR("Failed to allocate registry memory\n");
                        return -ENOMEM;
index 9a517c2..992308f 100644 (file)
@@ -285,6 +285,19 @@ struct bnx2x_vlan_mac_obj {
        /* RAMROD command to be used */
        int                             ramrod_cmd;
 
+       /* copy first n elements onto preallocated buffer
+        *
+        * @param n number of elements to get
+        * @param buf buffer preallocated by caller into which elements
+        *            will be copied. Note elements are 4-byte aligned
+        *            so buffer size must be able to accomodate the
+        *            aligned elements.
+        *
+        * @return number of copied bytes
+        */
+       int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+                             int n, u8 *buf);
+
        /**
         * Checks if ADD-ramrod with the given params may be performed.
         *
index 02ac6a7..bc0121a 100644 (file)
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
 #endif
 }
 
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+       u16 res = sizeof(struct host_port_stats) >> 2;
+
+       /* if PFC stats are not supported by the MFW, don't DMA them */
+       if (!(bp->flags &  BC_SUPPORTS_PFC_STATS))
+               res -= (sizeof(u32)*4) >> 2;
+
+       return res;
+}
+
 /*
  * Init service functions
  */
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
                                   DMAE_LEN32_RD_MAX * 4);
        dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
                                   DMAE_LEN32_RD_MAX * 4);
-       dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+       dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
                dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
                dmae->dst_addr_lo = bp->port.port_stx >> 2;
                dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->len = bnx2x_get_port_stats_dma_len(bp);
                dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
                dmae->comp_addr_hi = 0;
                dmae->comp_val = 1;
@@ -540,6 +552,25 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
                UPDATE_STAT64(tx_stat_gterr,
                                tx_stat_dot3statsinternalmactransmiterrors);
                UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+               /* collect PFC stats */
+               DIFF_64(diff.hi, new->tx_stat_gtpp_hi,
+                       pstats->pfc_frames_tx_hi,
+                       diff.lo, new->tx_stat_gtpp_lo,
+                       pstats->pfc_frames_tx_lo);
+               pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+               pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+               ADD_64(pstats->pfc_frames_tx_hi, diff.hi,
+                       pstats->pfc_frames_tx_lo, diff.lo);
+
+               DIFF_64(diff.hi, new->rx_stat_grpp_hi,
+                       pstats->pfc_frames_rx_hi,
+                       diff.lo, new->rx_stat_grpp_lo,
+                       pstats->pfc_frames_rx_lo);
+               pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+               pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
+               ADD_64(pstats->pfc_frames_rx_hi, diff.hi,
+                       pstats->pfc_frames_rx_lo, diff.lo);
        }
 
        estats->pause_frames_received_hi =
@@ -551,6 +582,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
                                pstats->mac_stx[1].tx_stat_outxoffsent_hi;
        estats->pause_frames_sent_lo =
                                pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+       estats->pfc_frames_received_hi =
+                               pstats->pfc_frames_rx_hi;
+       estats->pfc_frames_received_lo =
+                               pstats->pfc_frames_rx_lo;
+       estats->pfc_frames_sent_hi =
+                               pstats->pfc_frames_tx_hi;
+       estats->pfc_frames_sent_lo =
+                               pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +611,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
        ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
        ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
 
+       /* collect pfc stats */
+       ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+               pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+       ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+               pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
 
        ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
        ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +673,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
                                pstats->mac_stx[1].tx_stat_outxoffsent_hi;
        estats->pause_frames_sent_lo =
                                pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+       estats->pfc_frames_received_hi =
+                               pstats->pfc_frames_rx_hi;
+       estats->pfc_frames_received_lo =
+                               pstats->pfc_frames_rx_lo;
+       estats->pfc_frames_sent_hi =
+                               pstats->pfc_frames_tx_hi;
+       estats->pfc_frames_sent_lo =
+                               pstats->pfc_frames_tx_lo;
 }
 
 static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +794,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
        estats->brb_drop_hi = pstats->brb_drop_hi;
        estats->brb_drop_lo = pstats->brb_drop_lo;
 
-       pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+       pstats->host_port_stats_counter++;
 
        if (!BP_NOMCP(bp)) {
                u32 nig_timer_max =
@@ -1265,7 +1319,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
                dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
                dmae->dst_addr_lo = bp->port.port_stx >> 2;
                dmae->dst_addr_hi = 0;
-               dmae->len = sizeof(struct host_port_stats) >> 2;
+               dmae->len = bnx2x_get_port_stats_dma_len(bp);
                if (bp->func_stx) {
                        dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
                        dmae->comp_addr_hi = 0;
@@ -1349,12 +1403,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
        enum bnx2x_stats_state state;
        if (unlikely(bp->panic))
                return;
-       bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
        spin_unlock_bh(&bp->stats_lock);
 
+       bnx2x_stats_stm[state][event].action(bp);
+
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
                   state, event, bp->stats_state);
@@ -1380,7 +1436,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
        dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
        dmae->dst_addr_lo = bp->port.port_stx >> 2;
        dmae->dst_addr_hi = 0;
-       dmae->len = sizeof(struct host_port_stats) >> 2;
+       dmae->len = bnx2x_get_port_stats_dma_len(bp);
        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
        dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1513,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
 static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 {
        int i;
+       int first_queue_query_index;
        struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
 
        dma_addr_t cur_data_offset;
@@ -1512,14 +1569,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
        cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
        cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
 
+       /**** FCoE FW statistics data ****/
+       if (!NO_FCOE(bp)) {
+               cur_data_offset = bp->fw_stats_data_mapping +
+                       offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+               cur_query_entry =
+                       &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+               cur_query_entry->kind = STATS_TYPE_FCOE;
+               /* For FCoE query index is a DONT CARE */
+               cur_query_entry->index = BP_PORT(bp);
+               cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+               cur_query_entry->address.hi =
+                       cpu_to_le32(U64_HI(cur_data_offset));
+               cur_query_entry->address.lo =
+                       cpu_to_le32(U64_LO(cur_data_offset));
+       }
+
        /**** Clients' queries ****/
        cur_data_offset = bp->fw_stats_data_mapping +
                offsetof(struct bnx2x_fw_stats_data, queue_stats);
 
+       /* first queue query index depends whether FCoE offloaded request will
+        * be included in the ramrod
+        */
+       if (!NO_FCOE(bp))
+               first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+       else
+               first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
        for_each_eth_queue(bp, i) {
                cur_query_entry =
                        &bp->fw_stats_req->
-                                       query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+                                       query[first_queue_query_index + i];
 
                cur_query_entry->kind = STATS_TYPE_QUEUE;
                cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1614,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
 
                cur_data_offset += sizeof(struct per_queue_stats);
        }
+
+       /* add FCoE queue query if needed */
+       if (!NO_FCOE(bp)) {
+               cur_query_entry =
+                       &bp->fw_stats_req->
+                                       query[first_queue_query_index + i];
+
+               cur_query_entry->kind = STATS_TYPE_QUEUE;
+               cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+               cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+               cur_query_entry->address.hi =
+                       cpu_to_le32(U64_HI(cur_data_offset));
+               cur_query_entry->address.lo =
+                       cpu_to_le32(U64_LO(cur_data_offset));
+       }
 }
 
 void bnx2x_stats_init(struct bnx2x *bp)
index 5d8ce2f..683deb0 100644 (file)
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
        u32 total_tpa_aggregated_frames_lo;
        u32 total_tpa_bytes_hi;
        u32 total_tpa_bytes_lo;
+
+       /* PFC */
+       u32 pfc_frames_received_hi;
+       u32 pfc_frames_received_lo;
+       u32 pfc_frames_sent_hi;
+       u32 pfc_frames_sent_lo;
 };
 
 
index 6f10c69..4bcb67e 100644 (file)
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
        return io->data;
 }
 
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+
+       if (reg)
+               info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+       else
+               info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+       info.data.ulp_type = ulp_type;
+       ethdev->drv_ctl(dev->netdev, &info);
+}
+
 static int cnic_in_use(struct cnic_sock *csk)
 {
        return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type)
        }
        read_unlock(&cnic_dev_lock);
 
-       rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+       RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
 
        mutex_unlock(&cnic_lock);
        synchronize_rcu();
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
 
        mutex_unlock(&cnic_lock);
 
+       cnic_ulp_ctl(dev, ulp_type, true);
+
        return 0;
 
 }
@@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
        }
        mutex_lock(&cnic_lock);
        if (rcu_dereference(cp->ulp_ops[ulp_type])) {
-               rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+               RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
                cnic_put(dev);
        } else {
                pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
        if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
                netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
+       cnic_ulp_ctl(dev, ulp_type, false);
+
        return 0;
 }
 EXPORT_SYMBOL(cnic_unregister_driver);
@@ -3052,9 +3071,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
        }
 }
 
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_ulp_ops *ulp_ops;
+       int rc;
+
+       mutex_lock(&cnic_lock);
+       ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+       if (ulp_ops && ulp_ops->cnic_get_stats)
+               rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+       else
+               rc = -ENODEV;
+       mutex_unlock(&cnic_lock);
+       return rc;
+}
+
 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
 {
        struct cnic_dev *dev = data;
+       int ulp_type = CNIC_ULP_ISCSI;
 
        switch (info->cmd) {
        case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3136,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
                }
                break;
        }
+       case CNIC_CTL_FCOE_STATS_GET_CMD:
+               ulp_type = CNIC_ULP_FCOE;
+               /* fall through */
+       case CNIC_CTL_ISCSI_STATS_GET_CMD:
+               cnic_hold(dev);
+               cnic_copy_ulp_stats(dev, ulp_type);
+               cnic_put(dev);
+               break;
+
        default:
                return -EINVAL;
        }
@@ -3475,7 +3520,7 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
        struct flowi6 fl6;
 
        memset(&fl6, 0, sizeof(fl6));
-       ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+       fl6.daddr = dst_addr->sin6_addr;
        if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                fl6.flowi6_oif = dst_addr->sin6_scope_id;
 
@@ -5134,7 +5179,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
                }
                cnic_shutdown_rings(dev);
                clear_bit(CNIC_F_CNIC_UP, &dev->flags);
-               rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+               RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
                synchronize_rcu();
                cnic_cm_shutdown(dev);
                cp->stop_hw(dev);
@@ -5288,6 +5333,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        cdev->pcidev = pdev;
        cp->chip_id = ethdev->chip_id;
 
+       cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
        if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
                cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
        if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
index 79443e0..d1f6456 100644 (file)
@@ -86,6 +86,8 @@ struct kcqe {
 #define CNIC_CTL_START_CMD             2
 #define CNIC_CTL_COMPLETION_CMD                3
 #define CNIC_CTL_STOP_ISCSI_CMD                4
+#define CNIC_CTL_FCOE_STATS_GET_CMD    5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD   6
 
 #define DRV_CTL_IO_WR_CMD              0x101
 #define DRV_CTL_IO_RD_CMD              0x102
@@ -96,6 +98,8 @@ struct kcqe {
 #define DRV_CTL_STOP_L2_CMD            0x107
 #define DRV_CTL_RET_L2_SPQ_CREDIT_CMD  0x10c
 #define DRV_CTL_ISCSI_STOPPED_CMD      0x10d
+#define DRV_CTL_ULP_REGISTER_CMD       0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD     0x10f
 
 struct cnic_ctl_completion {
        u32     cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
                struct drv_ctl_spq_credit credit;
                struct drv_ctl_io io;
                struct drv_ctl_l2_ring ring;
+               int ulp_type;
                char bytes[MAX_DRV_CTL_DATA];
        } data;
 };
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
                                               struct kwqe_16 *[], u32);
        int             (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
        unsigned long   reserved1[2];
+       union drv_info_to_mcp   *addr_drv_info_to_mcp;
 };
 
 struct cnic_sockaddr {
@@ -297,6 +303,8 @@ struct cnic_dev {
        int             max_fcoe_conn;
        int             max_rdma_conn;
 
+       union drv_info_to_mcp   *stats_addr;
+
        void            *cnic_priv;
 };
 
@@ -326,6 +334,7 @@ struct cnic_ulp_ops {
        void (*cm_remote_abort)(struct cnic_sock *);
        int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
                                  char *data, u16 data_size);
+       int (*cnic_get_stats)(void *ulp_ctx);
        struct module *owner;
        atomic_t ref_count;
 };
index 0a1d7f2..8fa7abc 100644 (file)
@@ -163,7 +163,6 @@ enum sbmac_state {
 #define SBMAC_MAX_TXDESCR      256
 #define SBMAC_MAX_RXDESCR      256
 
-#define ETHER_ADDR_LEN         6
 #define ENET_PACKET_SIZE       1518
 /*#define ENET_PACKET_SIZE     9216 */
 
@@ -266,7 +265,7 @@ struct sbmac_softc {
        int                     sbm_pause;      /* current pause setting */
        int                     sbm_link;       /* current link state */
 
-       unsigned char           sbm_hwaddr[ETHER_ADDR_LEN];
+       unsigned char           sbm_hwaddr[ETH_ALEN];
 
        struct sbmacdma         sbm_txdma;      /* only channel 0 for now */
        struct sbmacdma         sbm_rxdma;
@@ -2676,15 +2675,4 @@ static struct platform_driver sbmac_driver = {
        },
 };
 
-static int __init sbmac_init_module(void)
-{
-       return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
-       platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
index bf40741..8bf11ca 100644 (file)
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    121
+#define TG3_MIN_NUM                    122
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "November 2, 2011"
+#define DRV_MODULE_RELDATE     "December 7, 2011"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
        (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
         TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
 #define TG3_DEF_RX_JUMBO_RING_PENDING  100
-#define TG3_RSS_INDIR_TBL_SIZE         128
 
 /* Do not place this n-ring entries value into the tp struct itself,
  * we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 #if (NET_IP_ALIGN != 0)
 #define TG3_RX_OFFSET(tp)      ((tp)->rx_offset)
 #else
-#define TG3_RX_OFFSET(tp)      0
+#define TG3_RX_OFFSET(tp)      (NET_SKB_PAD)
 #endif
 
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX              4096
+#define TG3_TX_BD_DMA_MAX_2K           2048
+#define TG3_TX_BD_DMA_MAX_4K           4096
 
 #define TG3_RAW_IP_ALIGN 2
 
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
        }
 }
 
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
-       u16 miireg;
-
-       if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
-               miireg = ADVERTISE_PAUSE_CAP;
-       else if (flow_ctrl & FLOW_CTRL_TX)
-               miireg = ADVERTISE_PAUSE_ASYM;
-       else if (flow_ctrl & FLOW_CTRL_RX)
-               miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-       else
-               miireg = 0;
-
-       return miireg;
-}
-
 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 {
        u16 miireg;
@@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
 {
        u8 cap = 0;
 
-       if (lcladv & ADVERTISE_1000XPAUSE) {
-               if (lcladv & ADVERTISE_1000XPSE_ASYM) {
-                       if (rmtadv & LPA_1000XPAUSE)
-                               cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
-                       else if (rmtadv & LPA_1000XPAUSE_ASYM)
-                               cap = FLOW_CTRL_RX;
-               } else {
-                       if (rmtadv & LPA_1000XPAUSE)
-                               cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
-               }
-       } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
-               if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+       if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+               cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+       } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+               if (lcladv & ADVERTISE_1000XPAUSE)
+                       cap = FLOW_CTRL_RX;
+               if (rmtadv & ADVERTISE_1000XPAUSE)
                        cap = FLOW_CTRL_TX;
        }
 
@@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
                if (phydev->duplex == DUPLEX_HALF)
                        mac_mode |= MAC_MODE_HALF_DUPLEX;
                else {
-                       lcl_adv = tg3_advert_flowctrl_1000T(
+                       lcl_adv = mii_advertise_flowctrl(
                                  tp->link_config.flowctrl);
 
                        if (phydev->pause)
@@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
        if (tp->link_config.active_speed == SPEED_1000 &&
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+            tg3_flag(tp, 57765_CLASS)) &&
            !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
                val = MII_TG3_DSP_TAP26_ALNOKO |
                      MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
        bool need_vaux = false;
 
        /* The GPIOs do something completely different on 57765. */
-       if (!tg3_flag(tp, IS_NIC) ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
                return;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
        u32 val, new_adv;
 
        new_adv = ADVERTISE_CSMA;
-       if (advertise & ADVERTISED_10baseT_Half)
-               new_adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               new_adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               new_adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               new_adv |= ADVERTISE_100FULL;
-
-       new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+       new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+       new_adv |= mii_advertise_flowctrl(flowctrl);
 
        err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
        if (err)
                goto done;
 
-       if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
-               goto done;
-
-       new_adv = 0;
-       if (advertise & ADVERTISED_1000baseT_Half)
-               new_adv |= ADVERTISE_1000HALF;
-       if (advertise & ADVERTISED_1000baseT_Full)
-               new_adv |= ADVERTISE_1000FULL;
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+               new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 
-       if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
-           tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
-               new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+                   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+                       new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 
-       err = tg3_writephy(tp, MII_CTRL1000, new_adv);
-       if (err)
-               goto done;
+               err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+               if (err)
+                       goto done;
+       }
 
        if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
                goto done;
@@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
                switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
                case ASIC_REV_5717:
                case ASIC_REV_57765:
+               case ASIC_REV_57766:
                case ASIC_REV_5719:
                        /* If we advertised any eee advertisements above... */
                        if (val)
@@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
        return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 {
-       u32 adv_reg, all_mask = 0;
+       u32 advmsk, tgtadv, advertising;
 
-       if (mask & ADVERTISED_10baseT_Half)
-               all_mask |= ADVERTISE_10HALF;
-       if (mask & ADVERTISED_10baseT_Full)
-               all_mask |= ADVERTISE_10FULL;
-       if (mask & ADVERTISED_100baseT_Half)
-               all_mask |= ADVERTISE_100HALF;
-       if (mask & ADVERTISED_100baseT_Full)
-               all_mask |= ADVERTISE_100FULL;
+       advertising = tp->link_config.advertising;
+       tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 
-       if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
-               return 0;
+       advmsk = ADVERTISE_ALL;
+       if (tp->link_config.active_duplex == DUPLEX_FULL) {
+               tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+               advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+       }
 
-       if ((adv_reg & ADVERTISE_ALL) != all_mask)
-               return 0;
+       if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+               return false;
+
+       if ((*lcladv & advmsk) != tgtadv)
+               return false;
 
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
-               all_mask = 0;
-               if (mask & ADVERTISED_1000baseT_Half)
-                       all_mask |= ADVERTISE_1000HALF;
-               if (mask & ADVERTISED_1000baseT_Full)
-                       all_mask |= ADVERTISE_1000FULL;
+               tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 
                if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
-                       return 0;
+                       return false;
 
                tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
-               if (tg3_ctrl != all_mask)
-                       return 0;
+               if (tg3_ctrl != tgtadv)
+                       return false;
        }
 
-       return 1;
+       return true;
 }
 
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 {
-       u32 curadv, reqadv;
+       u32 lpeth = 0;
 
-       if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
-               return 1;
-
-       curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-       reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+       if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+               u32 val;
 
-       if (tp->link_config.active_duplex == DUPLEX_FULL) {
-               if (curadv != reqadv)
-                       return 0;
+               if (tg3_readphy(tp, MII_STAT1000, &val))
+                       return false;
 
-               if (tg3_flag(tp, PAUSE_AUTONEG))
-                       tg3_readphy(tp, MII_LPA, rmtadv);
-       } else {
-               /* Reprogram the advertisement register, even if it
-                * does not affect the current link.  If the link
-                * gets renegotiated in the future, we can save an
-                * additional renegotiation cycle by advertising
-                * it correctly in the first place.
-                */
-               if (curadv != reqadv) {
-                       *lcladv &= ~(ADVERTISE_PAUSE_CAP |
-                                    ADVERTISE_PAUSE_ASYM);
-                       tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
-               }
+               lpeth = mii_stat1000_to_ethtool_lpa_t(val);
        }
 
-       return 1;
+       if (tg3_readphy(tp, MII_LPA, rmtadv))
+               return false;
+
+       lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+       tp->link_config.rmt_adv = lpeth;
+
+       return true;
 }
 
 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
        current_link_up = 0;
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
+       tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+       tp->link_config.rmt_adv = 0;
 
        if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
                err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 
                if (tp->link_config.autoneg == AUTONEG_ENABLE) {
                        if ((bmcr & BMCR_ANENABLE) &&
-                           tg3_copper_is_advertising_all(tp,
-                                               tp->link_config.advertising)) {
-                               if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
-                                                                 &rmt_adv))
-                                       current_link_up = 1;
-                       }
+                           tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+                           tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+                               current_link_up = 1;
                } else {
                        if (!(bmcr & BMCR_ANENABLE) &&
                            tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                }
 
                if (current_link_up == 1 &&
-                   tp->link_config.active_duplex == DUPLEX_FULL)
+                   tp->link_config.active_duplex == DUPLEX_FULL) {
+                       u32 reg, bit;
+
+                       if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+                               reg = MII_TG3_FET_GEN_STAT;
+                               bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+                       } else {
+                               reg = MII_TG3_EXT_STAT;
+                               bit = MII_TG3_EXT_STAT_MDIX;
+                       }
+
+                       if (!tg3_readphy(tp, reg, &val) && (val & bit))
+                               tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
                        tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+               }
        }
 
 relink:
@@ -4643,6 +4606,9 @@ restart_autoneg:
                        if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
                                remote_adv |= LPA_1000XPAUSE_ASYM;
 
+                       tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
+
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
                        current_link_up = 1;
                        tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
                        if (rxflags & MR_LP_ADV_ASYM_PAUSE)
                                remote_adv |= LPA_1000XPAUSE_ASYM;
 
+                       tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
+
                        tg3_setup_flow_control(tp, local_adv, remote_adv);
 
                        current_link_up = 1;
@@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
        udelay(40);
 
        current_link_up = 0;
+       tp->link_config.rmt_adv = 0;
        mac_status = tr32(MAC_STATUS);
 
        if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
        current_link_up = 0;
        current_speed = SPEED_INVALID;
        current_duplex = DUPLEX_INVALID;
+       tp->link_config.rmt_adv = 0;
 
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
            (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
                /* do nothing, just check for link up at the end */
        } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
-               u32 adv, new_adv;
+               u32 adv, newadv;
 
                err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
-               new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
-                                 ADVERTISE_1000XPAUSE |
-                                 ADVERTISE_1000XPSE_ASYM |
-                                 ADVERTISE_SLCT);
-
-               new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+               newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+                                ADVERTISE_1000XPAUSE |
+                                ADVERTISE_1000XPSE_ASYM |
+                                ADVERTISE_SLCT);
 
-               if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
-                       new_adv |= ADVERTISE_1000XHALF;
-               if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
-                       new_adv |= ADVERTISE_1000XFULL;
+               newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+               newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
 
-               if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
-                       tg3_writephy(tp, MII_ADVERTISE, new_adv);
+               if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+                       tg3_writephy(tp, MII_ADVERTISE, newadv);
                        bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
                        tg3_writephy(tp, MII_BMCR, bmcr);
 
@@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
                                        current_duplex = DUPLEX_FULL;
                                else
                                        current_duplex = DUPLEX_HALF;
+
+                               tp->link_config.rmt_adv =
+                                          mii_adv_to_ethtool_adv_x(remote_adv);
                        } else if (!tg3_flag(tp, 5780_CLASS)) {
                                /* Link is up via parallel detect */
                        } else {
@@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
        u32 sw_idx = tnapi->tx_cons;
        struct netdev_queue *txq;
        int index = tnapi - tp->napi;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
 
        if (tg3_flag(tp, ENABLE_TSS))
                index--;
@@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
                        sw_idx = NEXT_TX(sw_idx);
                }
 
+               pkts_compl++;
+               bytes_compl += skb->len;
+
                dev_kfree_skb(skb);
 
                if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
                }
        }
 
+       netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+
        tnapi->tx_cons = sw_idx;
 
        /* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
        }
 }
 
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 {
-       if (!ri->skb)
+       if (!ri->data)
                return;
 
        pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
                         map_sz, PCI_DMA_FROMDEVICE);
-       dev_kfree_skb_any(ri->skb);
-       ri->skb = NULL;
+       kfree(ri->data);
+       ri->data = NULL;
 }
 
 /* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
  * buffers the cpu only reads the last cacheline of the RX descriptor
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
                            u32 opaque_key, u32 dest_idx_unmasked)
 {
        struct tg3_rx_buffer_desc *desc;
        struct ring_info *map;
-       struct sk_buff *skb;
+       u8 *data;
        dma_addr_t mapping;
-       int skb_size, dest_idx;
+       int skb_size, data_size, dest_idx;
 
        switch (opaque_key) {
        case RXD_OPAQUE_RING_STD:
                dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
                desc = &tpr->rx_std[dest_idx];
                map = &tpr->rx_std_buffers[dest_idx];
-               skb_size = tp->rx_pkt_map_sz;
+               data_size = tp->rx_pkt_map_sz;
                break;
 
        case RXD_OPAQUE_RING_JUMBO:
                dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
                desc = &tpr->rx_jmb[dest_idx].std;
                map = &tpr->rx_jmb_buffers[dest_idx];
-               skb_size = TG3_RX_JMB_MAP_SZ;
+               data_size = TG3_RX_JMB_MAP_SZ;
                break;
 
        default:
@@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
         * Callers depend upon this behavior and assume that
         * we leave everything unchanged if we fail.
         */
-       skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
-       if (skb == NULL)
+       skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+                  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       data = kmalloc(skb_size, GFP_ATOMIC);
+       if (!data)
                return -ENOMEM;
 
-       skb_reserve(skb, TG3_RX_OFFSET(tp));
-
-       mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+       mapping = pci_map_single(tp->pdev,
+                                data + TG3_RX_OFFSET(tp),
+                                data_size,
                                 PCI_DMA_FROMDEVICE);
        if (pci_dma_mapping_error(tp->pdev, mapping)) {
-               dev_kfree_skb(skb);
+               kfree(data);
                return -EIO;
        }
 
-       map->skb = skb;
+       map->data = data;
        dma_unmap_addr_set(map, mapping, mapping);
 
        desc->addr_hi = ((u64)mapping >> 32);
        desc->addr_lo = ((u64)mapping & 0xffffffff);
 
-       return skb_size;
+       return data_size;
 }
 
 /* We only need to move over in the address because the other
  * members of the RX descriptor are invariant.  See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
  */
 static void tg3_recycle_rx(struct tg3_napi *tnapi,
                           struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
                return;
        }
 
-       dest_map->skb = src_map->skb;
+       dest_map->data = src_map->data;
        dma_unmap_addr_set(dest_map, mapping,
                           dma_unmap_addr(src_map, mapping));
        dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
         */
        smp_wmb();
 
-       src_map->skb = NULL;
+       src_map->data = NULL;
 }
 
 /* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                struct sk_buff *skb;
                dma_addr_t dma_addr;
                u32 opaque_key, desc_idx, *post_ptr;
+               u8 *data;
 
                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
                if (opaque_key == RXD_OPAQUE_RING_STD) {
                        ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
                        dma_addr = dma_unmap_addr(ri, mapping);
-                       skb = ri->skb;
+                       data = ri->data;
                        post_ptr = &std_prod_idx;
                        rx_std_posted++;
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
                        ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
                        dma_addr = dma_unmap_addr(ri, mapping);
-                       skb = ri->skb;
+                       data = ri->data;
                        post_ptr = &jmb_prod_idx;
                } else
                        goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        goto next_pkt;
                }
 
+               prefetch(data + TG3_RX_OFFSET(tp));
                len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
                      ETH_FCS_LEN;
 
                if (len > TG3_RX_COPY_THRESH(tp)) {
                        int skb_size;
 
-                       skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+                       skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
                                                    *post_ptr);
                        if (skb_size < 0)
                                goto drop_it;
@@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                        pci_unmap_single(tp->pdev, dma_addr, skb_size,
                                         PCI_DMA_FROMDEVICE);
 
-                       /* Ensure that the update to the skb happens
+                       skb = build_skb(data);
+                       if (!skb) {
+                               kfree(data);
+                               goto drop_it_no_recycle;
+                       }
+                       skb_reserve(skb, TG3_RX_OFFSET(tp));
+                       /* Ensure that the update to the data happens
                         * after the usage of the old DMA mapping.
                         */
                        smp_wmb();
 
-                       ri->skb = NULL;
+                       ri->data = NULL;
 
-                       skb_put(skb, len);
                } else {
-                       struct sk_buff *copy_skb;
-
                        tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
 
-                       copy_skb = netdev_alloc_skb(tp->dev, len +
-                                                   TG3_RAW_IP_ALIGN);
-                       if (copy_skb == NULL)
+                       skb = netdev_alloc_skb(tp->dev,
+                                              len + TG3_RAW_IP_ALIGN);
+                       if (skb == NULL)
                                goto drop_it_no_recycle;
 
-                       skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
-                       skb_put(copy_skb, len);
+                       skb_reserve(skb, TG3_RAW_IP_ALIGN);
                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-                       skb_copy_from_linear_data(skb, copy_skb->data, len);
+                       memcpy(skb->data,
+                              data + TG3_RX_OFFSET(tp),
+                              len);
                        pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
-                       /* We'll reuse the original ring buffer. */
-                       skb = copy_skb;
                }
 
+               skb_put(skb, len);
                if ((tp->dev->features & NETIF_F_RXCSUM) &&
                    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
                    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
                di = dpr->rx_std_prod_idx;
 
                for (i = di; i < di + cpycnt; i++) {
-                       if (dpr->rx_std_buffers[i].skb) {
+                       if (dpr->rx_std_buffers[i].data) {
                                cpycnt = i - di;
                                err = -ENOSPC;
                                break;
@@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
                di = dpr->rx_jmb_prod_idx;
 
                for (i = di; i < di + cpycnt; i++) {
-                       if (dpr->rx_jmb_buffers[i].skb) {
+                       if (dpr->rx_jmb_buffers[i].data) {
                                cpycnt = i - di;
                                err = -ENOSPC;
                                break;
@@ -6451,17 +6433,17 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
        if (tg3_40bit_overflow_test(tp, map, len))
                hwbug = 1;
 
-       if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+       if (tp->dma_limit) {
                u32 prvidx = *entry;
                u32 tmp_flag = flags & ~TXD_FLAG_END;
-               while (len > TG3_TX_BD_DMA_MAX && *budget) {
-                       u32 frag_len = TG3_TX_BD_DMA_MAX;
-                       len -= TG3_TX_BD_DMA_MAX;
+               while (len > tp->dma_limit && *budget) {
+                       u32 frag_len = tp->dma_limit;
+                       len -= tp->dma_limit;
 
                        /* Avoid the 8byte DMA problem */
                        if (len <= 8) {
-                               len += TG3_TX_BD_DMA_MAX / 2;
-                               frag_len = TG3_TX_BD_DMA_MAX / 2;
+                               len += tp->dma_limit / 2;
+                               frag_len = tp->dma_limit / 2;
                        }
 
                        tnapi->tx_buffers[*entry].fragmented = true;
@@ -6816,6 +6798,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        skb_tx_timestamp(skb);
+       netdev_sent_queue(tp->dev, skb->len);
 
        /* Packets are ready, update Tx producer idx local and on card. */
        tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6951,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
        return 0;
 }
 
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
 {
        struct tg3 *tp = netdev_priv(dev);
 
@@ -6994,7 +6977,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
        }
 }
 
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct tg3 *tp = netdev_priv(dev);
 
@@ -7004,9 +6988,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
                tg3_set_loopback(dev, features);
@@ -7082,14 +7066,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
        if (tpr != &tp->napi[0].prodring) {
                for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
                     i = (i + 1) & tp->rx_std_ring_mask)
-                       tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+                       tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
                                        tp->rx_pkt_map_sz);
 
                if (tg3_flag(tp, JUMBO_CAPABLE)) {
                        for (i = tpr->rx_jmb_cons_idx;
                             i != tpr->rx_jmb_prod_idx;
                             i = (i + 1) & tp->rx_jmb_ring_mask) {
-                               tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                               tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
                                                TG3_RX_JMB_MAP_SZ);
                        }
                }
@@ -7098,12 +7082,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
        }
 
        for (i = 0; i <= tp->rx_std_ring_mask; i++)
-               tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+               tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
                                tp->rx_pkt_map_sz);
 
        if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
                for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
-                       tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                       tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
                                        TG3_RX_JMB_MAP_SZ);
        }
 }
@@ -7159,7 +7143,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 
        /* Now allocate fresh SKBs for each rx ring. */
        for (i = 0; i < tp->rx_pending; i++) {
-               if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+               if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
                        netdev_warn(tp->dev,
                                    "Using a smaller RX standard ring. Only "
                                    "%d out of %d buffers were allocated "
@@ -7191,7 +7175,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
        }
 
        for (i = 0; i < tp->rx_jumbo_pending; i++) {
-               if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+               if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
                        netdev_warn(tp->dev,
                                    "Using a smaller RX jumbo ring. Only %d "
                                    "out of %d buffers were allocated "
@@ -7297,6 +7281,7 @@ static void tg3_free_rings(struct tg3 *tp)
                        dev_kfree_skb_any(skb);
                }
        }
+       netdev_reset_queue(tp->dev);
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -7591,8 +7576,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
                if (tnapi->hw_status)
                        memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
        }
-       if (tp->hw_stats)
-               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 
        return err;
 }
@@ -7626,15 +7609,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
 
        pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
-               if (tg3_flag(tp, PCI_EXPRESS))
-                       pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-               else {
-                       pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
-                                             tp->pci_cacheline_sz);
-                       pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
-                                             tp->pci_lat_timer);
-               }
+       if (!tg3_flag(tp, PCI_EXPRESS)) {
+               pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+                                     tp->pci_cacheline_sz);
+               pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+                                     tp->pci_lat_timer);
        }
 
        /* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7798,6 @@ static int tg3_chip_reset(struct tg3 *tp)
                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
                                      val16);
 
-               pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
                /* Clear error status */
                pci_write_config_word(tp->pdev,
                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7891,11 @@ static int tg3_chip_reset(struct tg3 *tp)
        return 0;
 }
 
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+                                                struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
+                                               struct tg3_ethtool_stats *);
+
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
 {
@@ -7931,6 +7913,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
        tg3_write_sig_legacy(tp, kind);
        tg3_write_sig_post_reset(tp, kind);
 
+       if (tp->hw_stats) {
+               /* Save the stats across chip resets... */
+               tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+               tg3_get_estats(tp, &tp->estats_prev);
+
+               /* And make sure the next sample is new data */
+               memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+       }
+
        if (err)
                return err;
 
@@ -8074,7 +8065,7 @@ static void tg3_rings_reset(struct tg3 *tp)
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
        else if (tg3_flag(tp, 5717_PLUS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       else if (tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
        else
                limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8082,7 @@ static void tg3_rings_reset(struct tg3 *tp)
        else if (!tg3_flag(tp, 5705_PLUS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
-                GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                tg3_flag(tp, 57765_CLASS))
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
        else
                limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8188,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
        if (!tg3_flag(tp, 5750_PLUS) ||
            tg3_flag(tp, 5780_CLASS) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+           tg3_flag(tp, 57765_PLUS))
                bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
        else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8209,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
        if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
                return;
 
-       if (!tg3_flag(tp, 5705_PLUS))
-               bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
-       else
-               bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+       bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 
        host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 
@@ -8231,6 +8220,37 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
                tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 }
 
+void tg3_rss_init_indir_tbl(struct tg3 *tp)
+{
+       int i;
+
+       if (!tg3_flag(tp, SUPPORT_MSIX))
+               return;
+
+       if (tp->irq_cnt <= 2)
+               memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+       else
+               for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+                       tp->rss_ind_tbl[i] = i % (tp->irq_cnt - 1);
+}
+
+void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+       int i = 0;
+       u32 reg = MAC_RSS_INDIR_TBL_0;
+
+       while (i < TG3_RSS_INDIR_TBL_SIZE) {
+               u32 val = tp->rss_ind_tbl[i];
+               i++;
+               for (; i % 8; i++) {
+                       val <<= 4;
+                       val |= tp->rss_ind_tbl[i];
+               }
+               tw32(reg, val);
+               reg += 4;
+       }
+}
+
 /* tp->lock is held. */
 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 {
@@ -8337,7 +8357,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tw32(GRC_MODE, grc_mode);
        }
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+       if (tg3_flag(tp, 57765_CLASS)) {
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
                        u32 grc_mode = tr32(GRC_MODE);
 
@@ -8425,7 +8445,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
                if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
                        val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+               if (!tg3_flag(tp, 57765_CLASS) &&
                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
                        val |= DMA_RWCTRL_TAGGED_STAT_WA;
                tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8592,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                        tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
                             val | BDINFO_FLAGS_USE_EXT_RECV);
                        if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
-                           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                           tg3_flag(tp, 57765_CLASS))
                                tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
                                     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
                } else {
@@ -8581,10 +8601,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                }
 
                if (tg3_flag(tp, 57765_PLUS)) {
-                       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
-                               val = TG3_RX_STD_MAX_SIZE_5700;
-                       else
-                               val = TG3_RX_STD_MAX_SIZE_5717;
+                       val = TG3_RX_STD_RING_SIZE(tp);
                        val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
                        val |= (TG3_RX_STD_DMA_SZ << 2);
                } else
@@ -8661,6 +8678,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (tg3_flag(tp, PCI_EXPRESS))
                rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3))
@@ -8924,28 +8944,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        udelay(100);
 
        if (tg3_flag(tp, ENABLE_RSS)) {
-               int i = 0;
-               u32 reg = MAC_RSS_INDIR_TBL_0;
-
-               if (tp->irq_cnt == 2) {
-                       for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
-                               tw32(reg, 0x0);
-                               reg += 4;
-                       }
-               } else {
-                       u32 val;
-
-                       while (i < TG3_RSS_INDIR_TBL_SIZE) {
-                               val = i % (tp->irq_cnt - 1);
-                               i++;
-                               for (; i % 8; i++) {
-                                       val <<= 4;
-                                       val |= (i % (tp->irq_cnt - 1));
-                               }
-                               tw32(reg, val);
-                               reg += 4;
-                       }
-               }
+               tg3_rss_write_indir_tbl(tp);
 
                /* Setup the "secret" hash key. */
                tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9001,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        /* Prevent chip from dropping frames when flow control
         * is enabled.
         */
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       if (tg3_flag(tp, 57765_CLASS))
                val = 1;
        else
                val = 2;
@@ -9217,7 +9216,7 @@ static void tg3_timer(unsigned long __opaque)
        spin_lock(&tp->lock);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           tg3_flag(tp, 57765_CLASS))
                tg3_chk_missed_msi(tp);
 
        if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9669,6 +9668,8 @@ static int tg3_open(struct net_device *dev)
         */
        tg3_ints_init(tp);
 
+       tg3_rss_init_indir_tbl(tp);
+
        /* The placement of this call is tied
         * to the setup and use of Host TX descriptors.
         */
@@ -9700,8 +9701,8 @@ static int tg3_open(struct net_device *dev)
                tg3_free_rings(tp);
        } else {
                if (tg3_flag(tp, TAGGED_STATUS) &&
-                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
-                       GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+                   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+                   !tg3_flag(tp, 57765_CLASS))
                        tp->timer_offset = HZ;
                else
                        tp->timer_offset = HZ / 10;
@@ -9782,10 +9783,6 @@ err_out1:
        return err;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
-                                                struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
 static int tg3_close(struct net_device *dev)
 {
        int i;
@@ -9817,10 +9814,9 @@ static int tg3_close(struct net_device *dev)
 
        tg3_ints_fini(tp);
 
-       tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
-       memcpy(&tp->estats_prev, tg3_get_estats(tp),
-              sizeof(tp->estats_prev));
+       /* Clear stats across close / open calls */
+       memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+       memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
 
        tg3_napi_fini(tp);
 
@@ -9868,9 +9864,9 @@ static u64 calc_crc_errors(struct tg3 *tp)
        estats->member =        old_estats->member + \
                                get_stat64(&hw_stats->member)
 
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
+                                              struct tg3_ethtool_stats *estats)
 {
-       struct tg3_ethtool_stats *estats = &tp->estats;
        struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
@@ -10318,12 +10314,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        cmd->advertising |= ADVERTISED_Asym_Pause;
                }
        }
-       if (netif_running(dev)) {
+       if (netif_running(dev) && netif_carrier_ok(dev)) {
                ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
                cmd->duplex = tp->link_config.active_duplex;
+               cmd->lp_advertising = tp->link_config.rmt_adv;
+               if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+                       if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+                               cmd->eth_tp_mdix = ETH_TP_MDI_X;
+                       else
+                               cmd->eth_tp_mdix = ETH_TP_MDI;
+               }
        } else {
                ethtool_cmd_speed_set(cmd, SPEED_INVALID);
                cmd->duplex = DUPLEX_INVALID;
+               cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
        }
        cmd->phy_address = tp->phy_addr;
        cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10432,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       strcpy(info->fw_version, tp->fw_ver);
-       strcpy(info->bus_info, pci_name(tp->pdev));
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+       strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
 }
 
 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10594,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
 
        epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
 
-       if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+       if (tp->link_config.flowctrl & FLOW_CTRL_RX)
                epause->rx_pause = 1;
        else
                epause->rx_pause = 0;
 
-       if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+       if (tp->link_config.flowctrl & FLOW_CTRL_TX)
                epause->tx_pause = 1;
        else
                epause->tx_pause = 0;
@@ -10769,7 +10773,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
                                   struct ethtool_stats *estats, u64 *tmp_stats)
 {
        struct tg3 *tp = netdev_priv(dev);
-       memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+       tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
 }
 
 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11357,7 @@ static int tg3_test_memory(struct tg3 *tp)
 
        if (tg3_flag(tp, 5717_PLUS))
                mem_tbl = mem_tbl_5717;
-       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+       else if (tg3_flag(tp, 57765_CLASS))
                mem_tbl = mem_tbl_57765;
        else if (tg3_flag(tp, 5755_PLUS))
                mem_tbl = mem_tbl_5755;
@@ -11400,8 +11405,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
        u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
        u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
        u32 budget;
-       struct sk_buff *skb, *rx_skb;
-       u8 *tx_data;
+       struct sk_buff *skb;
+       u8 *tx_data, *rx_data;
        dma_addr_t map;
        int num_pkts, tx_len, rx_len, i, err;
        struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11574,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                }
 
                if (opaque_key == RXD_OPAQUE_RING_STD) {
-                       rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+                       rx_data = tpr->rx_std_buffers[desc_idx].data;
                        map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
                                             mapping);
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
-                       rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+                       rx_data = tpr->rx_jmb_buffers[desc_idx].data;
                        map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
                                             mapping);
                } else
@@ -11582,15 +11587,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
                                            PCI_DMA_FROMDEVICE);
 
+               rx_data += TG3_RX_OFFSET(tp);
                for (i = data_off; i < rx_len; i++, val++) {
-                       if (*(rx_skb->data + i) != (u8) (val & 0xff))
+                       if (*(rx_data + i) != (u8) (val & 0xff))
                                goto out;
                }
        }
 
        err = 0;
 
-       /* tg3_free_rings will unmap and free the rx_skb */
+       /* tg3_free_rings will unmap and free the rx_data */
 out:
        return err;
 }
@@ -12612,7 +12618,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                        tg3_get_5906_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-                        GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+                        tg3_flag(tp, 57765_CLASS))
                        tg3_get_57780_nvram_info(tp);
                else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13224,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
 
 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
 {
-       u32 adv = ADVERTISED_Autoneg |
-                 ADVERTISED_Pause;
+       u32 adv = ADVERTISED_Autoneg;
 
        if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
                adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13327,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
            !tg3_flag(tp, ENABLE_APE) &&
            !tg3_flag(tp, ENABLE_ASF)) {
-               u32 bmsr, mask;
+               u32 bmsr, dummy;
 
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13340,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
 
                tg3_phy_set_wirespeed(tp);
 
-               mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
-               if (!tg3_copper_is_advertising_all(tp, mask)) {
+               if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
                        tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
                                            tp->link_config.flowctrl);
 
@@ -13460,6 +13462,17 @@ out_no_vpd:
                        strcpy(tp->board_part_number, "BCM57795");
                else
                        goto nomatch;
+       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+                       strcpy(tp->board_part_number, "BCM57762");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+                       strcpy(tp->board_part_number, "BCM57766");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+                       strcpy(tp->board_part_number, "BCM57782");
+               else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+                       strcpy(tp->board_part_number, "BCM57786");
+               else
+                       goto nomatch;
        } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
                strcpy(tp->board_part_number, "BCM95906");
        } else {
@@ -13798,7 +13811,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
-                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+                        tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
                        pci_read_config_dword(tp->pdev,
                                              TG3PCI_GEN15_PRODID_ASICREV,
                                              &prod_id_asic_rev);
@@ -13945,7 +13962,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tg3_flag_set(tp, 5717_PLUS);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
-           tg3_flag(tp, 5717_PLUS))
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               tg3_flag_set(tp, 57765_CLASS);
+
+       if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
                tg3_flag_set(tp, 57765_PLUS);
 
        /* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14017,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3) ||
-           (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+           tp->fw_needed) {
+               /* For firmware TSO, assume ASF is disabled.
+                * We'll disable TSO later if we discover ASF
+                * is enabled in tg3_get_eeprom_hw_cfg().
+                */
                tg3_flag_set(tp, TSO_CAPABLE);
-       else {
+       else {
                tg3_flag_clear(tp, TSO_CAPABLE);
                tg3_flag_clear(tp, TSO_BUG);
                tp->fw_needed = NULL;
@@ -14034,9 +14058,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
-               tg3_flag_set(tp, 4K_FIFO_LIMIT);
+               tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+       else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+               tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
 
-       if (tg3_flag(tp, 5717_PLUS))
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
                tg3_flag_set(tp, LRG_PROD_RING_CAP);
 
        if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14084,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
                tg3_flag_set(tp, PCI_EXPRESS);
 
-               tp->pcie_readrq = 4096;
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
-                   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
-                       tp->pcie_readrq = 2048;
-
-               pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+               if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+                       int readrq = pcie_get_readrq(tp->pdev);
+                       if (readrq > 2048)
+                               pcie_set_readrq(tp->pdev, 2048);
+               }
 
                pci_read_config_word(tp->pdev,
                                     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14300,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         */
        tg3_get_eeprom_hw_cfg(tp);
 
+       if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+               tg3_flag_clear(tp, TSO_CAPABLE);
+               tg3_flag_clear(tp, TSO_BUG);
+               tp->fw_needed = NULL;
+       }
+
        if (tg3_flag(tp, ENABLE_APE)) {
                /* Allow reads and writes to the
                 * APE register and memory space.
@@ -14311,7 +14344,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
-           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+           tg3_flag(tp, 57765_CLASS))
                tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
 
        if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14581,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        else
                tg3_flag_clear(tp, POLL_SERDES);
 
-       tp->rx_offset = NET_IP_ALIGN;
+       tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
        tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
            tg3_flag(tp, PCIX_MODE)) {
-               tp->rx_offset = 0;
+               tp->rx_offset = NET_SKB_PAD;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
                tp->rx_copy_thresh = ~(u16)0;
 #endif
@@ -15313,7 +15346,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        u32 sndmbx, rcvmbx, intmbx;
        char str[40];
        u64 dma_mask, persist_dma_mask;
-       u32 features = 0;
+       netdev_features_t features = 0;
 
        printk_once(KERN_INFO "%s\n", version);
 
index 94b4bd0..aea8f72 100644 (file)
@@ -31,6 +31,8 @@
 #define TG3_RX_RET_MAX_SIZE_5705       512
 #define TG3_RX_RET_MAX_SIZE_5717       4096
 
+#define TG3_RSS_INDIR_TBL_SIZE         128
+
 /* First 256 bytes are a mirror of PCI config space. */
 #define TG3PCI_VENDOR                  0x00000000
 #define  TG3PCI_VENDOR_BROADCOM                 0x14e4
 #define  TG3PCI_DEVICE_TIGON3_57795     0x16b6
 #define  TG3PCI_DEVICE_TIGON3_5719      0x1657
 #define  TG3PCI_DEVICE_TIGON3_5720      0x165f
+#define  TG3PCI_DEVICE_TIGON3_57762     0x1682
+#define  TG3PCI_DEVICE_TIGON3_57766     0x1686
+#define  TG3PCI_DEVICE_TIGON3_57786     0x16b3
+#define  TG3PCI_DEVICE_TIGON3_57782     0x16b7
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM           PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6   0x1644
 #define   ASIC_REV_57765                0x57785
 #define   ASIC_REV_5719                         0x5719
 #define   ASIC_REV_5720                         0x5720
+#define   ASIC_REV_57766                0x57766
 #define  GET_CHIP_REV(CHIP_REV_ID)     ((CHIP_REV_ID) >> 8)
 #define   CHIPREV_5700_AX               0x70
 #define   CHIPREV_5700_BX               0x71
 #define  RDMAC_MODE_MBUF_SBD_CRPT_ENAB  0x00002000
 #define  RDMAC_MODE_FIFO_SIZE_128       0x00020000
 #define  RDMAC_MODE_FIFO_LONG_BURST     0x00030000
+#define  RDMAC_MODE_JMB_2K_MMRR                 0x00800000
 #define  RDMAC_MODE_MULT_DMA_RD_DIS     0x01000000
 #define  RDMAC_MODE_IPV4_LSO_EN                 0x08000000
 #define  RDMAC_MODE_IPV6_LSO_EN                 0x10000000
 #define  MII_TG3_EXT_CTRL_TBI          0x8000
 
 #define MII_TG3_EXT_STAT               0x11 /* Extended status register */
+#define  MII_TG3_EXT_STAT_MDIX         0x2000
 #define  MII_TG3_EXT_STAT_LPASS                0x0100
 
 #define MII_TG3_RXR_COUNTERS           0x14 /* Local/Remote Receiver Counts */
 #define  MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
 #define  MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
 
+#define MII_TG3_FET_GEN_STAT           0x1c
+#define  MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000
+
 #define MII_TG3_FET_TEST               0x1f
 #define  MII_TG3_FET_SHADOW_EN         0x0080
 
@@ -2662,9 +2674,13 @@ struct tg3_hw_stats {
 /* 'mapping' is superfluous as the chip does not write into
  * the tx/rx post rings so we could just fetch it from there.
  * But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
  */
 struct ring_info {
-       struct sk_buff                  *skb;
+       u8                              *data;
        DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
@@ -2690,6 +2706,7 @@ struct tg3_link_config {
 #define DUPLEX_INVALID         0xff
 #define AUTONEG_INVALID                0xff
        u16                             active_speed;
+       u32                             rmt_adv;
 
        /* When we go in and out of low power mode we need
         * to swap with this state.
@@ -2865,6 +2882,8 @@ enum TG3_FLAGS {
        TG3_FLAG_NVRAM_BUFFERED,
        TG3_FLAG_SUPPORT_MSI,
        TG3_FLAG_SUPPORT_MSIX,
+       TG3_FLAG_USING_MSI,
+       TG3_FLAG_USING_MSIX,
        TG3_FLAG_PCIX_MODE,
        TG3_FLAG_PCI_HIGH_SPEED,
        TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@ enum TG3_FLAGS {
        TG3_FLAG_CHIP_RESETTING,
        TG3_FLAG_INIT_COMPLETE,
        TG3_FLAG_TSO_BUG,
-       TG3_FLAG_IS_5788,
        TG3_FLAG_MAX_RXPEND_64,
        TG3_FLAG_TSO_CAPABLE,
        TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@ enum TG3_FLAGS {
        TG3_FLAG_IS_NIC,
        TG3_FLAG_FLASH,
        TG3_FLAG_HW_TSO_1,
-       TG3_FLAG_5705_PLUS,
-       TG3_FLAG_5750_PLUS,
+       TG3_FLAG_HW_TSO_2,
        TG3_FLAG_HW_TSO_3,
-       TG3_FLAG_USING_MSI,
-       TG3_FLAG_USING_MSIX,
        TG3_FLAG_ICH_WORKAROUND,
-       TG3_FLAG_5780_CLASS,
-       TG3_FLAG_HW_TSO_2,
        TG3_FLAG_1SHOT_MSI,
        TG3_FLAG_NO_FWARE_REPORTED,
        TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@ enum TG3_FLAGS {
        TG3_FLAG_RGMII_EXT_IBND_RX_EN,
        TG3_FLAG_RGMII_EXT_IBND_TX_EN,
        TG3_FLAG_CLKREQ_BUG,
-       TG3_FLAG_5755_PLUS,
        TG3_FLAG_NO_NVRAM,
        TG3_FLAG_ENABLE_RSS,
        TG3_FLAG_ENABLE_TSS,
        TG3_FLAG_SHORT_DMA_BUG,
        TG3_FLAG_USE_JUMBO_BDFLAG,
        TG3_FLAG_L1PLLPD_EN,
-       TG3_FLAG_57765_PLUS,
        TG3_FLAG_APE_HAS_NCSI,
-       TG3_FLAG_5717_PLUS,
        TG3_FLAG_4K_FIFO_LIMIT,
        TG3_FLAG_RESET_TASK_PENDING,
+       TG3_FLAG_5705_PLUS,
+       TG3_FLAG_IS_5788,
+       TG3_FLAG_5750_PLUS,
+       TG3_FLAG_5780_CLASS,
+       TG3_FLAG_5755_PLUS,
+       TG3_FLAG_57765_PLUS,
+       TG3_FLAG_57765_CLASS,
+       TG3_FLAG_5717_PLUS,
 
        /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
        TG3_FLAG_NUMBER_OF_FLAGS,       /* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@ struct tg3 {
        /* begin "tx thread" cacheline section */
        void                            (*write32_tx_mbox) (struct tg3 *, u32,
                                                            u32);
+       u32                             dma_limit;
 
        /* begin "rx thread" cacheline section */
        struct tg3_napi                 napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@ struct tg3 {
        unsigned long                   rx_dropped;
        unsigned long                   tx_dropped;
        struct rtnl_link_stats64        net_stats_prev;
-       struct tg3_ethtool_stats        estats;
        struct tg3_ethtool_stats        estats_prev;
 
        DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@ struct tg3 {
 #define TG3_PHYFLG_SERDES_PREEMPHASIS  0x00010000
 #define TG3_PHYFLG_PARALLEL_DETECT     0x00020000
 #define TG3_PHYFLG_EEE_CAP             0x00040000
+#define TG3_PHYFLG_MDIX_STATE          0x00200000
 
        u32                             led_ctrl;
        u32                             phy_otp;
        u32                             setlpicnt;
+       u8                              rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
 
 #define TG3_BPN_SIZE                   24
        char                            board_part_number[TG3_BPN_SIZE];
index 7f3091e..aac3a3b 100644 (file)
@@ -2968,7 +2968,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
        return err;
 }
 
-static void
+static int
 bnad_vlan_rx_add_vid(struct net_device *netdev,
                                 unsigned short vid)
 {
@@ -2976,7 +2976,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
        unsigned long flags;
 
        if (!bnad->rx_info[0].rx)
-               return;
+               return 0;
 
        mutex_lock(&bnad->conf_mutex);
 
@@ -2986,9 +2986,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        mutex_unlock(&bnad->conf_mutex);
+
+       return 0;
 }
 
-static void
+static int
 bnad_vlan_rx_kill_vid(struct net_device *netdev,
                                  unsigned short vid)
 {
@@ -2996,7 +2998,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
        unsigned long flags;
 
        if (!bnad->rx_info[0].rx)
-               return;
+               return 0;
 
        mutex_lock(&bnad->conf_mutex);
 
@@ -3006,6 +3008,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        mutex_unlock(&bnad->conf_mutex);
+
+       return 0;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
index fd3dcc1..38d5c66 100644 (file)
@@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        struct bfa_ioc_attr *ioc_attr;
        unsigned long flags;
 
-       strcpy(drvinfo->driver, BNAD_NAME);
-       strcpy(drvinfo->version, BNAD_VERSION);
+       strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
 
        ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
        if (ioc_attr) {
@@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
                bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
                spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
-               strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
-                       sizeof(drvinfo->fw_version) - 1);
+               strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+                       sizeof(drvinfo->fw_version));
                kfree(ioc_attr);
        }
 
-       strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+       strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+               sizeof(drvinfo->bus_info));
 }
 
 static void
index 1b3e90d..32e8f17 100644 (file)
@@ -43,8 +43,7 @@ extern char bfa_version[];
 
 #pragma pack(1)
 
-#define MAC_ADDRLEN    (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
 
 #pragma pack()
 
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644 (file)
index 0000000..a52e725
--- /dev/null
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+       tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+
+       select CRC32
+       help
+         This is the driver for the XGMAC Ethernet IP block found on Calxeda
+         Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644 (file)
index 0000000..f0ef080
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644 (file)
index 0000000..107c1b0
--- /dev/null
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL          0x00000000      /* MAC Configuration */
+#define XGMAC_FRAME_FILTER     0x00000004      /* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL                0x00000018      /* MAC Flow Control */
+#define XGMAC_VLAN_TAG         0x0000001C      /* VLAN Tags */
+#define XGMAC_VERSION          0x00000020      /* Version */
+#define XGMAC_VLAN_INCL                0x00000024      /* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL         0x00000028      /* LPI Control and Status */
+#define XGMAC_LPI_TIMER                0x0000002C      /* LPI Timers Control */
+#define XGMAC_TX_PACE          0x00000030      /* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH                0x00000034      /* VLAN Hash Table */
+#define XGMAC_DEBUG            0x00000038      /* Debug */
+#define XGMAC_INT_STAT         0x0000003C      /* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg)   (0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg)    (0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n)          (0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH         16
+#define XGMAC_OMR              0x00000400
+#define XGMAC_REMOTE_WAKE      0x00000700      /* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT              0x00000704      /* PMT Control and Status */
+#define XGMAC_MMC_CTRL         0x00000800      /* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX      0x00000804      /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX      0x00000808      /* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c      /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX 0x00000810      /* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO        0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI        0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO        0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI        0x00000820
+#define XGMAC_MMC_TXBCFRAME_G  0x00000824
+#define XGMAC_MMC_TXMCFRAME_G  0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
+#define XGMAC_MMC_TXUNDERFLOW  0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
+#define XGMAC_MMC_TXVLANFRAME  0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO        0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI        0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO        0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI        0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
+#define XGMAC_MMC_RXBCFRAME_G  0x00000918
+#define XGMAC_MMC_RXMCFRAME_G  0x00000920
+#define XGMAC_MMC_RXCRCERR     0x00000928
+#define XGMAC_MMC_RXRUNT       0x00000930
+#define XGMAC_MMC_RXJABBER     0x00000934
+#define XGMAC_MMC_RXUCFRAME_G  0x00000970
+#define XGMAC_MMC_RXLENGTHERR  0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
+#define XGMAC_MMC_RXOVERFLOW   0x00000990
+#define XGMAC_MMC_RXVLANFRAME  0x00000998
+#define XGMAC_MMC_RXWATCHDOG   0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE     0x00000f00      /* Bus Mode */
+#define XGMAC_DMA_TX_POLL      0x00000f04      /* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL      0x00000f08      /* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c      /* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10      /* Transmit List Base */
+#define XGMAC_DMA_STATUS       0x00000f14      /* Status Register */
+#define XGMAC_DMA_CONTROL      0x00000f18      /* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA     0x00000f1c      /* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20    /* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER        0x00000f24      /* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS      0x00000f28      /* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS   0x00000f2C      /* AXI Status */
+#define XGMAC_DMA_HW_FEATURE   0x00000f58      /* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE          0x80000000
+#define XGMAC_MAX_FILTER_ADDR  31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET        0x80000000
+#define XGMAC_PMT_GLBL_UNICAST 0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM        0x00000040
+#define XGMAC_PMT_MAGIC_PKT    0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN        0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
+#define XGMAC_PMT_POWERDOWN    0x00000001
+
+#define XGMAC_CONTROL_SPD      0x40000000      /* Speed control */
+#define XGMAC_CONTROL_SPD_MASK 0x60000000
+#define XGMAC_CONTROL_SPD_1G   0x60000000
+#define XGMAC_CONTROL_SPD_2_5G 0x40000000
+#define XGMAC_CONTROL_SPD_10G  0x00000000
+#define XGMAC_CONTROL_SARC     0x10000000      /* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK        0x18000000
+#define XGMAC_CONTROL_CAR      0x04000000      /* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK 0x06000000
+#define XGMAC_CONTROL_DP       0x01000000      /* Disable Padding */
+#define XGMAC_CONTROL_WD       0x00800000      /* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD       0x00400000      /* Jabber disable */
+#define XGMAC_CONTROL_JE       0x00100000      /* Jumbo frame */
+#define XGMAC_CONTROL_LM       0x00001000      /* Loop-back mode */
+#define XGMAC_CONTROL_IPC      0x00000400      /* Checksum Offload */
+#define XGMAC_CONTROL_ACS      0x00000080      /* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC     0x00000010      /* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE       0x00000008      /* Transmitter Enable */
+#define XGMAC_CONTROL_RE       0x00000004      /* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR  0x00000001      /* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC 0x00000002      /* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC 0x00000004      /* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF        0x00000008      /* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM  0x00000010      /* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF 0x00000020      /* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF        0x00000100      /* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF 0x00000200      /* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF 0x00000400      /* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF 0x00000800      /* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF 0x00001000      /* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA  0x80000000      /* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK        0xffff0000      /* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT       16
+#define XGMAC_FLOW_CTRL_DZQP   0x00000080      /* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT    0x00000020      /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030    /* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP     0x00000008      /* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE    0x00000004      /* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE    0x00000002      /* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA        0x00000001      /* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT     0x0080          /* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI     0x0040          /* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001      /* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK  0x0000007c      /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2               /* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS      0x00000080      /* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK  0x00003f00      /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_FB                0x00010000      /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000      /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT        17
+#define DMA_BUS_MODE_USP       0x00800000
+#define DMA_BUS_MODE_8PBL      0x01000000
+#define DMA_BUS_MODE_AAL       0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK  0x0000c000      /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB             0x00010000      /* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST         0x00002000      /* Start/Stop Transmission */
+#define DMA_CONTROL_SR         0x00000002      /* Start/Stop Receive */
+#define DMA_CONTROL_DFF                0x01000000      /* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE       0x00010000      /* Normal Summary */
+#define DMA_INTR_ENA_AIE       0x00008000      /* Abnormal Summary */
+#define DMA_INTR_ENA_ERE       0x00004000      /* Early Receive */
+#define DMA_INTR_ENA_FBE       0x00002000      /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE       0x00000400      /* Early Transmit */
+#define DMA_INTR_ENA_RWE       0x00000200      /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE       0x00000100      /* Receive Stopped */
+#define DMA_INTR_ENA_RUE       0x00000080      /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE       0x00000040      /* Receive Interrupt */
+#define DMA_INTR_ENA_UNE       0x00000020      /* Tx Underflow */
+#define DMA_INTR_ENA_OVE       0x00000010      /* Receive Overflow */
+#define DMA_INTR_ENA_TJE       0x00000008      /* Transmit Jabber */
+#define DMA_INTR_ENA_TUE       0x00000004      /* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE       0x00000002      /* Transmit Stopped */
+#define DMA_INTR_ENA_TIE       0x00000001      /* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL                (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+                                DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL      (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+                                DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+                                DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+                                DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+                                DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK  (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI         0x08000000      /* MMC interrupt */
+#define DMA_STATUS_GLI         0x04000000      /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK     0x00380000      /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000      /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000      /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK     0x00700000      /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT    20
+#define DMA_STATUS_RS_MASK     0x000e0000      /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT    17
+#define DMA_STATUS_NIS         0x00010000      /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS         0x00008000      /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI         0x00004000      /* Early Receive Interrupt */
+#define DMA_STATUS_FBI         0x00002000      /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI         0x00000400      /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT         0x00000200      /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS         0x00000100      /* Receive Process Stopped */
+#define DMA_STATUS_RU          0x00000080      /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI          0x00000040      /* Receive Interrupt */
+#define DMA_STATUS_UNF         0x00000020      /* Transmit Underflow */
+#define DMA_STATUS_OVF         0x00000010      /* Receive Overflow */
+#define DMA_STATUS_TJT         0x00000008      /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU          0x00000004      /* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS         0x00000002      /* Transmit Process Stopped */
+#define DMA_STATUS_TI          0x00000001      /* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX          0x00000008      /* Transmitter Enable */
+#define MAC_ENABLE_RX          0x00000004      /* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF          0x00200000      /* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF          0x00100000      /* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC          0x00020000      /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK     0x00030000
+#define XGMAC_OMR_RFD          0x00006000      /* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK     0x00007000      /* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA          0x00000600      /* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK     0x00000E00      /* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC          0x00000100      /* Enable Hardware FC */
+#define XGMAC_OMR_FEF          0x00000080      /* Forward Error Frames */
+#define XGMAC_OMR_DT           0x00000040      /* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF          0x00000020      /* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC          0x00000010      /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK     0x00000018      /* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL   0x00010000      /* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ                (0x2000 - 8)
+
+#define RXDESC_EXT_STATUS      0x00000001
+#define RXDESC_CRC_ERR         0x00000002
+#define RXDESC_RX_ERR          0x00000008
+#define RXDESC_RX_WDOG         0x00000010
+#define RXDESC_FRAME_TYPE      0x00000020
+#define RXDESC_GIANT_FRAME     0x00000080
+#define RXDESC_LAST_SEG                0x00000100
+#define RXDESC_FIRST_SEG       0x00000200
+#define RXDESC_VLAN_FRAME      0x00000400
+#define RXDESC_OVERFLOW_ERR    0x00000800
+#define RXDESC_LENGTH_ERR      0x00001000
+#define RXDESC_SA_FILTER_FAIL  0x00002000
+#define RXDESC_DESCRIPTOR_ERR  0x00004000
+#define RXDESC_ERROR_SUMMARY   0x00008000
+#define RXDESC_FRAME_LEN_OFFSET        16
+#define RXDESC_FRAME_LEN_MASK  0x3fff0000
+#define RXDESC_DA_FILTER_FAIL  0x40000000
+
+#define RXDESC1_END_RING       0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK 0x00000003
+#define RXDESC_IP_PAYLOAD_UDP  0x00000001
+#define RXDESC_IP_PAYLOAD_TCP  0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
+#define RXDESC_IP_HEADER_ERR   0x00000008
+#define RXDESC_IP_PAYLOAD_ERR  0x00000010
+#define RXDESC_IPV4_PACKET     0x00000040
+#define RXDESC_IPV6_PACKET     0x00000080
+#define TXDESC_UNDERFLOW_ERR   0x00000001
+#define TXDESC_JABBER_TIMEOUT  0x00000002
+#define TXDESC_LOCAL_FAULT     0x00000004
+#define TXDESC_REMOTE_FAULT    0x00000008
+#define TXDESC_VLAN_FRAME      0x00000010
+#define TXDESC_FRAME_FLUSHED   0x00000020
+#define TXDESC_IP_HEADER_ERR   0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR        0x00000080
+#define TXDESC_ERROR_SUMMARY   0x00008000
+#define TXDESC_SA_CTRL_INSERT  0x00040000
+#define TXDESC_SA_CTRL_REPLACE 0x00080000
+#define TXDESC_2ND_ADDR_CHAINED        0x00100000
+#define TXDESC_END_RING                0x00200000
+#define TXDESC_CSUM_IP         0x00400000
+#define TXDESC_CSUM_IP_PAYLD   0x00800000
+#define TXDESC_CSUM_ALL                0x00C00000
+#define TXDESC_CRC_EN_REPLACE  0x01000000
+#define TXDESC_CRC_EN_APPEND   0x02000000
+#define TXDESC_DISABLE_PAD     0x04000000
+#define TXDESC_FIRST_SEG       0x10000000
+#define TXDESC_LAST_SEG                0x20000000
+#define TXDESC_INTERRUPT       0x40000000
+
+#define DESC_OWN               0x80000000
+#define DESC_BUFFER1_SZ_MASK   0x00001fff
+#define DESC_BUFFER2_SZ_MASK   0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET 16
+
+struct xgmac_dma_desc {
+       __le32 flags;
+       __le32 buf_size;
+       __le32 buf1_addr;               /* Buffer 1 Address Pointer */
+       __le32 buf2_addr;               /* Buffer 2 Address Pointer */
+       __le32 ext_status;
+       __le32 res[3];
+};
+
+struct xgmac_extra_stats {
+       /* Transmit errors */
+       unsigned long tx_jabber;
+       unsigned long tx_frame_flushed;
+       unsigned long tx_payload_error;
+       unsigned long tx_ip_header_error;
+       unsigned long tx_local_fault;
+       unsigned long tx_remote_fault;
+       /* Receive errors */
+       unsigned long rx_watchdog;
+       unsigned long rx_da_filter_fail;
+       unsigned long rx_sa_filter_fail;
+       unsigned long rx_payload_error;
+       unsigned long rx_ip_header_error;
+       /* Tx/Rx IRQ errors */
+       unsigned long tx_undeflow;
+       unsigned long tx_process_stopped;
+       unsigned long rx_buf_unav;
+       unsigned long rx_process_stopped;
+       unsigned long tx_early;
+       unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+       struct xgmac_dma_desc *dma_rx;
+       struct sk_buff **rx_skbuff;
+       unsigned int rx_tail;
+       unsigned int rx_head;
+
+       struct xgmac_dma_desc *dma_tx;
+       struct sk_buff **tx_skbuff;
+       unsigned int tx_head;
+       unsigned int tx_tail;
+
+       void __iomem *base;
+       struct sk_buff_head rx_recycle;
+       unsigned int dma_buf_sz;
+       dma_addr_t dma_rx_phy;
+       dma_addr_t dma_tx_phy;
+
+       struct net_device *dev;
+       struct device *device;
+       struct napi_struct napi;
+
+       struct xgmac_extra_stats xstats;
+
+       spinlock_t stats_lock;
+       int pmt_irq;
+       char rx_pause;
+       char tx_pause;
+       int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU                        9000
+#define PAUSE_TIME             0x400
+
+#define DMA_RX_RING_SZ         256
+#define DMA_TX_RING_SZ         128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH              (DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s)    (((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s)        CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s)  CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+       if (buf_sz > MAX_DESC_BUF_SZ)
+               p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+                       (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+       else
+               p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+       u32 len = cpu_to_le32(p->flags);
+       return (len & DESC_BUFFER1_SZ_MASK) +
+               ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+                                    int buf_sz)
+{
+       struct xgmac_dma_desc *end = p + ring_size - 1;
+
+       memset(p, 0, sizeof(*p) * ring_size);
+
+       for (; p <= end; p++)
+               desc_set_buf_len(p, buf_sz);
+
+       end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+       memset(p, 0, sizeof(*p) * ring_size);
+       p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+       return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+       /* Clear all fields and set the owner */
+       p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+       u32 tmpflags = le32_to_cpu(p->flags);
+       tmpflags &= TXDESC_END_RING;
+       tmpflags |= flags | DESC_OWN;
+       p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+       return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+       return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+                                    u32 paddr, int len)
+{
+       p->buf1_addr = cpu_to_le32(paddr);
+       if (len > MAX_DESC_BUF_SZ)
+               p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+                                             u32 paddr, int len)
+{
+       desc_set_buf_len(p, len);
+       desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+       u32 data = le32_to_cpu(p->flags);
+       u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+       if (data & RXDESC_FRAME_TYPE)
+               len -= ETH_FCS_LEN;
+
+       return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+       int timeout = 1000;
+       u32 reg = readl(ioaddr + XGMAC_OMR);
+       writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+       while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+               udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+       struct xgmac_extra_stats *x = &priv->xstats;
+       u32 status = le32_to_cpu(p->flags);
+
+       if (!(status & TXDESC_ERROR_SUMMARY))
+               return 0;
+
+       netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+       if (status & TXDESC_JABBER_TIMEOUT)
+               x->tx_jabber++;
+       if (status & TXDESC_FRAME_FLUSHED)
+               x->tx_frame_flushed++;
+       if (status & TXDESC_UNDERFLOW_ERR)
+               xgmac_dma_flush_tx_fifo(priv->base);
+       if (status & TXDESC_IP_HEADER_ERR)
+               x->tx_ip_header_error++;
+       if (status & TXDESC_LOCAL_FAULT)
+               x->tx_local_fault++;
+       if (status & TXDESC_REMOTE_FAULT)
+               x->tx_remote_fault++;
+       if (status & TXDESC_PAYLOAD_CSUM_ERR)
+               x->tx_payload_error++;
+
+       return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+       struct xgmac_extra_stats *x = &priv->xstats;
+       int ret = CHECKSUM_UNNECESSARY;
+       u32 status = le32_to_cpu(p->flags);
+       u32 ext_status = le32_to_cpu(p->ext_status);
+
+       if (status & RXDESC_DA_FILTER_FAIL) {
+               netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+               x->rx_da_filter_fail++;
+               return -1;
+       }
+
+       /* Check if packet has checksum already */
+       if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+               !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+               ret = CHECKSUM_NONE;
+
+       netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+                  (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+       if (!(status & RXDESC_ERROR_SUMMARY))
+               return ret;
+
+       /* Handle any errors */
+       if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+               RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+               return -1;
+
+       if (status & RXDESC_EXT_STATUS) {
+               if (ext_status & RXDESC_IP_HEADER_ERR)
+                       x->rx_ip_header_error++;
+               if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+                       x->rx_payload_error++;
+               netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+                          ext_status);
+               return CHECKSUM_NONE;
+       }
+
+       return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+       u32 value = readl(ioaddr + XGMAC_CONTROL);
+       value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+       writel(value, ioaddr + XGMAC_CONTROL);
+
+       value = readl(ioaddr + XGMAC_DMA_CONTROL);
+       value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+       writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+       u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+       value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+       writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+       value = readl(ioaddr + XGMAC_CONTROL);
+       value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+       writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                              int num)
+{
+       u32 data;
+
+       data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+       writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                              int num)
+{
+       u32 hi_addr, lo_addr;
+
+       /* Read the MAC address from the hardware */
+       hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+       lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+       /* Extract the MAC address from the high and low words */
+       addr[0] = lo_addr & 0xff;
+       addr[1] = (lo_addr >> 8) & 0xff;
+       addr[2] = (lo_addr >> 16) & 0xff;
+       addr[3] = (lo_addr >> 24) & 0xff;
+       addr[4] = hi_addr & 0xff;
+       addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+       u32 reg;
+       unsigned int flow = 0;
+
+       priv->rx_pause = rx;
+       priv->tx_pause = tx;
+
+       if (rx || tx) {
+               if (rx)
+                       flow |= XGMAC_FLOW_CTRL_RFE;
+               if (tx)
+                       flow |= XGMAC_FLOW_CTRL_TFE;
+
+               flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+               flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+               writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+               reg = readl(priv->base + XGMAC_OMR);
+               reg |= XGMAC_OMR_EFC;
+               writel(reg, priv->base + XGMAC_OMR);
+       } else {
+               writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+               reg = readl(priv->base + XGMAC_OMR);
+               reg &= ~XGMAC_OMR_EFC;
+               writel(reg, priv->base + XGMAC_OMR);
+       }
+
+       return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+       struct xgmac_dma_desc *p;
+       dma_addr_t paddr;
+
+       while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+               int entry = priv->rx_head;
+               struct sk_buff *skb;
+
+               p = priv->dma_rx + entry;
+
+               if (priv->rx_skbuff[entry] != NULL)
+                       continue;
+
+               skb = __skb_dequeue(&priv->rx_recycle);
+               if (skb == NULL)
+                       skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+               if (unlikely(skb == NULL))
+                       break;
+
+               priv->rx_skbuff[entry] = skb;
+               paddr = dma_map_single(priv->device, skb->data,
+                                        priv->dma_buf_sz, DMA_FROM_DEVICE);
+               desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+               netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+                       priv->rx_head, priv->rx_tail);
+
+               priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+               /* Ensure descriptor is in memory before handing to h/w */
+               wmb();
+               desc_set_rx_owner(p);
+       }
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description:  this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       unsigned int bfsize;
+
+       /* Set the Buffer size according to the MTU;
+        * indeed, in case of jumbo we need to bump-up the buffer sizes.
+        */
+       bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+                      64);
+
+       netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+       priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+                                 GFP_KERNEL);
+       if (!priv->rx_skbuff)
+               return -ENOMEM;
+
+       priv->dma_rx = dma_alloc_coherent(priv->device,
+                                         DMA_RX_RING_SZ *
+                                         sizeof(struct xgmac_dma_desc),
+                                         &priv->dma_rx_phy,
+                                         GFP_KERNEL);
+       if (!priv->dma_rx)
+               goto err_dma_rx;
+
+       priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+                                 GFP_KERNEL);
+       if (!priv->tx_skbuff)
+               goto err_tx_skb;
+
+       priv->dma_tx = dma_alloc_coherent(priv->device,
+                                         DMA_TX_RING_SZ *
+                                         sizeof(struct xgmac_dma_desc),
+                                         &priv->dma_tx_phy,
+                                         GFP_KERNEL);
+       if (!priv->dma_tx)
+               goto err_dma_tx;
+
+       netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+           "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+           priv->dma_rx, priv->dma_tx,
+           (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+       priv->rx_tail = 0;
+       priv->rx_head = 0;
+       priv->dma_buf_sz = bfsize;
+       desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+       xgmac_rx_refill(priv);
+
+       priv->tx_tail = 0;
+       priv->tx_head = 0;
+       desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+       writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+       writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+       return 0;
+
+err_dma_tx:
+       kfree(priv->tx_skbuff);
+err_tx_skb:
+       dma_free_coherent(priv->device,
+                         DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+                         priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+       kfree(priv->rx_skbuff);
+       return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+       int i;
+       struct xgmac_dma_desc *p;
+
+       if (!priv->rx_skbuff)
+               return;
+
+       for (i = 0; i < DMA_RX_RING_SZ; i++) {
+               if (priv->rx_skbuff[i] == NULL)
+                       continue;
+
+               p = priv->dma_rx + i;
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                priv->dma_buf_sz, DMA_FROM_DEVICE);
+               dev_kfree_skb_any(priv->rx_skbuff[i]);
+               priv->rx_skbuff[i] = NULL;
+       }
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+       int i, f;
+       struct xgmac_dma_desc *p;
+
+       if (!priv->tx_skbuff)
+               return;
+
+       for (i = 0; i < DMA_TX_RING_SZ; i++) {
+               if (priv->tx_skbuff[i] == NULL)
+                       continue;
+
+               p = priv->dma_tx + i;
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                desc_get_buf_len(p), DMA_TO_DEVICE);
+
+               for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+                       p = priv->dma_tx + i++;
+                       dma_unmap_page(priv->device, desc_get_buf_addr(p),
+                                      desc_get_buf_len(p), DMA_TO_DEVICE);
+               }
+
+               dev_kfree_skb_any(priv->tx_skbuff[i]);
+               priv->tx_skbuff[i] = NULL;
+       }
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+       /* Release the DMA TX/RX socket buffers */
+       xgmac_free_rx_skbufs(priv);
+       xgmac_free_tx_skbufs(priv);
+
+       /* Free the consistent memory allocated for descriptor rings */
+       if (priv->dma_tx) {
+               dma_free_coherent(priv->device,
+                                 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+                                 priv->dma_tx, priv->dma_tx_phy);
+               priv->dma_tx = NULL;
+       }
+       if (priv->dma_rx) {
+               dma_free_coherent(priv->device,
+                                 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+                                 priv->dma_rx, priv->dma_rx_phy);
+               priv->dma_rx = NULL;
+       }
+       kfree(priv->rx_skbuff);
+       priv->rx_skbuff = NULL;
+       kfree(priv->tx_skbuff);
+       priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+       int i;
+       void __iomem *ioaddr = priv->base;
+
+       writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+       while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+               unsigned int entry = priv->tx_tail;
+               struct sk_buff *skb = priv->tx_skbuff[entry];
+               struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+               /* Check if the descriptor is owned by the DMA. */
+               if (desc_get_owner(p))
+                       break;
+
+               /* Verify tx error by looking at the last segment */
+               if (desc_get_tx_ls(p))
+                       desc_get_tx_status(priv, p);
+
+               netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+                       priv->tx_head, priv->tx_tail);
+
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                desc_get_buf_len(p), DMA_TO_DEVICE);
+
+               priv->tx_skbuff[entry] = NULL;
+               priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+               if (!skb) {
+                       continue;
+               }
+
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+                                                             DMA_TX_RING_SZ);
+                       p = priv->dma_tx + priv->tx_tail;
+
+                       dma_unmap_page(priv->device, desc_get_buf_addr(p),
+                                      desc_get_buf_len(p), DMA_TO_DEVICE);
+               }
+
+               /*
+                * If there's room in the queue (limit it to size)
+                * we add this skb back into the pool,
+                * if it's the right size.
+                */
+               if ((skb_queue_len(&priv->rx_recycle) <
+                       DMA_RX_RING_SZ) &&
+                       skb_recycle_check(skb, priv->dma_buf_sz))
+                       __skb_queue_head(&priv->rx_recycle, skb);
+               else
+                       dev_kfree_skb(skb);
+       }
+
+       if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+           TX_THRESH)
+               netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+       u32 reg, value, inten;
+
+       netif_stop_queue(priv->dev);
+
+       inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+       reg = readl(priv->base + XGMAC_DMA_CONTROL);
+       writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+       do {
+               value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+       } while (value && (value != 0x600000));
+
+       xgmac_free_tx_skbufs(priv);
+       desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+       priv->tx_tail = 0;
+       priv->tx_head = 0;
+       writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+       writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+               priv->base + XGMAC_DMA_STATUS);
+       writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+       netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+       u32 value, ctrl;
+       int limit;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+
+       /* Save the ctrl register value */
+       ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+       /* SW reset */
+       value = DMA_BUS_MODE_SFT_RESET;
+       writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+       limit = 15000;
+       while (limit-- &&
+               (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+               cpu_relax();
+       if (limit < 0)
+               return -EBUSY;
+
+       value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+               (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+               DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+       writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+       /* Enable interrupts */
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+       /* XGMAC requires AXI bus init. This is a 'magic number' for now */
+       writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+       ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+               XGMAC_CONTROL_CAR;
+       if (dev->features & NETIF_F_RXCSUM)
+               ctrl |= XGMAC_CONTROL_IPC;
+       writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+       value = DMA_CONTROL_DFF;
+       writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+       /* Set the HW DMA mode and the COE */
+       writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+               ioaddr + XGMAC_OMR);
+
+       /* Reset the MMC counters */
+       writel(1, ioaddr + XGMAC_MMC_CTRL);
+       return 0;
+}
+
+/**
+ *  xgmac_open - open entry point of the driver
+ *  @dev : pointer to the device structure.
+ *  Description:
+ *  This function is the open entry point of the driver.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+       int ret;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+
+       /* Check that the MAC address is valid.  If its not, refuse
+        * to bring the device up. The user must specify an
+        * address using the following linux command:
+        *      ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx  */
+       if (!is_valid_ether_addr(dev->dev_addr)) {
+               random_ether_addr(dev->dev_addr);
+               netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+                       dev->dev_addr);
+       }
+
+       skb_queue_head_init(&priv->rx_recycle);
+       memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+       /* Initialize the XGMAC and descriptors */
+       xgmac_hw_init(dev);
+       xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+       xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+       ret = xgmac_dma_desc_rings_init(dev);
+       if (ret < 0)
+               return ret;
+
+       /* Enable the MAC Rx/Tx */
+       xgmac_mac_enable(ioaddr);
+
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+/**
+ *  xgmac_release - close entry point of the driver
+ *  @dev : device pointer.
+ *  Description:
+ *  This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+
+       if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+               napi_disable(&priv->napi);
+
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+       skb_queue_purge(&priv->rx_recycle);
+
+       /* Disable the MAC core */
+       xgmac_mac_disable(priv->base);
+
+       /* Release and free the Rx/Tx resources */
+       xgmac_free_dma_desc_rings(priv);
+
+       return 0;
+}
+
+/**
+ *  xgmac_xmit:
+ *  @skb : the socket buffer
+ *  @dev : device pointer
+ *  Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       unsigned int entry;
+       int i;
+       int nfrags = skb_shinfo(skb)->nr_frags;
+       struct xgmac_dma_desc *desc, *first;
+       unsigned int desc_flags;
+       unsigned int len;
+       dma_addr_t paddr;
+
+       if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+           (nfrags + 1)) {
+               writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+                       priv->base + XGMAC_DMA_INTR_ENA);
+               netif_stop_queue(dev);
+               return NETDEV_TX_BUSY;
+       }
+
+       desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+               TXDESC_CSUM_ALL : 0;
+       entry = priv->tx_head;
+       desc = priv->dma_tx + entry;
+       first = desc;
+
+       len = skb_headlen(skb);
+       paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->device, paddr)) {
+               dev_kfree_skb(skb);
+               return -EIO;
+       }
+       priv->tx_skbuff[entry] = skb;
+       desc_set_buf_addr_and_size(desc, paddr, len);
+
+       for (i = 0; i < nfrags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               len = frag->size;
+
+               paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+                                        DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->device, paddr)) {
+                       dev_kfree_skb(skb);
+                       return -EIO;
+               }
+
+               entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+               desc = priv->dma_tx + entry;
+               priv->tx_skbuff[entry] = NULL;
+
+               desc_set_buf_addr_and_size(desc, paddr, len);
+               if (i < (nfrags - 1))
+                       desc_set_tx_owner(desc, desc_flags);
+       }
+
+       /* Interrupt on completition only for the latest segment */
+       if (desc != first)
+               desc_set_tx_owner(desc, desc_flags |
+                       TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+       else
+               desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+       /* Set owner on first desc last to avoid race condition */
+       wmb();
+       desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+       priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+       writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+       return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+       unsigned int entry;
+       unsigned int count = 0;
+       struct xgmac_dma_desc *p;
+
+       while (count < limit) {
+               int ip_checksum;
+               struct sk_buff *skb;
+               int frame_len;
+
+               writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+                      priv->base + XGMAC_DMA_STATUS);
+
+               entry = priv->rx_tail;
+               p = priv->dma_rx + entry;
+               if (desc_get_owner(p))
+                       break;
+
+               count++;
+               priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+               /* read the status of the incoming frame */
+               ip_checksum = desc_get_rx_status(priv, p);
+               if (ip_checksum < 0)
+                       continue;
+
+               skb = priv->rx_skbuff[entry];
+               if (unlikely(!skb)) {
+                       netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+                       break;
+               }
+               priv->rx_skbuff[entry] = NULL;
+
+               frame_len = desc_get_rx_frame_len(p);
+               netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+                       frame_len, ip_checksum);
+
+               skb_put(skb, frame_len);
+               dma_unmap_single(priv->device, desc_get_buf_addr(p),
+                                frame_len, DMA_FROM_DEVICE);
+
+               skb->protocol = eth_type_trans(skb, priv->dev);
+               skb->ip_summed = ip_checksum;
+               if (ip_checksum == CHECKSUM_NONE)
+                       netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&priv->napi, skb);
+       }
+
+       xgmac_rx_refill(priv);
+
+       writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+       return count;
+}
+
+/**
+ *  xgmac_poll - xgmac poll method (NAPI)
+ *  @napi : pointer to the napi structure.
+ *  @budget : maximum number of packets that the current CPU can receive from
+ *           all interfaces.
+ *  Description :
+ *   This function implements the the reception process.
+ *   Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+       struct xgmac_priv *priv = container_of(napi,
+                                      struct xgmac_priv, napi);
+       int work_done = 0;
+
+       xgmac_tx_complete(priv);
+       work_done = xgmac_rx(priv, budget);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+       }
+       return work_done;
+}
+
+/**
+ *  xgmac_tx_timeout
+ *  @dev : Pointer to net device structure
+ *  Description: this function is called when a packet transmission fails to
+ *   complete within a reasonable tmrate. The driver will mark the error in the
+ *   netdev structure and arrange for the device to be reset to a sane state
+ *   in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+
+       /* Clear Tx resources and restart transmitting again */
+       xgmac_tx_err(priv);
+}
+
+/**
+ *  xgmac_set_rx_mode - entry point for multicast addressing
+ *  @dev : pointer to the device structure
+ *  Description:
+ *  This function is a driver entry point which gets called by the kernel
+ *  whenever multicast addresses must be enabled/disabled.
+ *  Return value:
+ *  void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+       int i;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+       unsigned int value = 0;
+       u32 hash_filter[XGMAC_NUM_HASH];
+       int reg = 1;
+       struct netdev_hw_addr *ha;
+       bool use_hash = false;
+
+       netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+                netdev_mc_count(dev), netdev_uc_count(dev));
+
+       if (dev->flags & IFF_PROMISC) {
+               writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+               return;
+       }
+
+       memset(hash_filter, 0, sizeof(hash_filter));
+
+       if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+               use_hash = true;
+               value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+       }
+       netdev_for_each_uc_addr(ha, dev) {
+               if (use_hash) {
+                       u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+                       /* The most significant 4 bits determine the register to
+                        * use (H/L) while the other 5 bits determine the bit
+                        * within the register. */
+                       hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               } else {
+                       xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+                       reg++;
+               }
+       }
+
+       if (dev->flags & IFF_ALLMULTI) {
+               value |= XGMAC_FRAME_FILTER_PM;
+               goto out;
+       }
+
+       if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+               use_hash = true;
+               value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+       }
+       netdev_for_each_mc_addr(ha, dev) {
+               if (use_hash) {
+                       u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+                       /* The most significant 4 bits determine the register to
+                        * use (H/L) while the other 5 bits determine the bit
+                        * within the register. */
+                       hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               } else {
+                       xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+                       reg++;
+               }
+       }
+
+out:
+       for (i = 0; i < XGMAC_NUM_HASH; i++)
+               writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+       writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ *  xgmac_change_mtu - entry point to change MTU size for the device.
+ *  @dev : device pointer.
+ *  @new_mtu : the new MTU size for the device.
+ *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ *  to drive packet transmission. Ethernet has an MTU of 1500 octets
+ *  (ETH_DATA_LEN). This value can be changed with ifconfig.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       int old_mtu;
+
+       if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+               netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+               return -EINVAL;
+       }
+
+       old_mtu = dev->mtu;
+       dev->mtu = new_mtu;
+
+       /* return early if the buffer sizes will not change */
+       if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+               return 0;
+       if (old_mtu == new_mtu)
+               return 0;
+
+       /* Stop everything, get ready to change the MTU */
+       if (!netif_running(dev))
+               return 0;
+
+       /* Bring the interface down and then back up */
+       xgmac_stop(dev);
+       return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+       u32 intr_status;
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+
+       intr_status = readl(ioaddr + XGMAC_INT_STAT);
+       if (intr_status & XGMAC_INT_STAT_PMT) {
+               netdev_dbg(priv->dev, "received Magic frame\n");
+               /* clear the PMT bits 5 and 6 by reading the PMT */
+               readl(ioaddr + XGMAC_PMT);
+       }
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+       u32 intr_status;
+       bool tx_err = false;
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       struct xgmac_extra_stats *x = &priv->xstats;
+
+       /* read the status register (CSR5) */
+       intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+       intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+       writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+       /* It displays the DMA process states (CSR5 register) */
+       /* ABNORMAL interrupts */
+       if (unlikely(intr_status & DMA_STATUS_AIS)) {
+               if (intr_status & DMA_STATUS_TJT) {
+                       netdev_err(priv->dev, "transmit jabber\n");
+                       x->tx_jabber++;
+               }
+               if (intr_status & DMA_STATUS_RU)
+                       x->rx_buf_unav++;
+               if (intr_status & DMA_STATUS_RPS) {
+                       netdev_err(priv->dev, "receive process stopped\n");
+                       x->rx_process_stopped++;
+               }
+               if (intr_status & DMA_STATUS_ETI) {
+                       netdev_err(priv->dev, "transmit early interrupt\n");
+                       x->tx_early++;
+               }
+               if (intr_status & DMA_STATUS_TPS) {
+                       netdev_err(priv->dev, "transmit process stopped\n");
+                       x->tx_process_stopped++;
+                       tx_err = true;
+               }
+               if (intr_status & DMA_STATUS_FBI) {
+                       netdev_err(priv->dev, "fatal bus error\n");
+                       x->fatal_bus_error++;
+                       tx_err = true;
+               }
+
+               if (tx_err)
+                       xgmac_tx_err(priv);
+       }
+
+       /* TX/RX NORMAL interrupts */
+       if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+               writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+               napi_schedule(&priv->napi);
+       }
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+       disable_irq(dev->irq);
+       xgmac_interrupt(dev->irq, dev);
+       enable_irq(dev->irq);
+}
+#endif
+
+struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+                      struct rtnl_link_stats64 *storage)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *base = priv->base;
+       u32 count;
+
+       spin_lock_bh(&priv->stats_lock);
+       writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+       storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+       storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+       storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+       storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+       storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+       storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+       storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+       storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+       storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+       count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+       storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+       storage->tx_packets = count;
+       storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+       writel(0, base + XGMAC_MMC_CTRL);
+       spin_unlock_bh(&priv->stats_lock);
+       return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+       xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+       return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+       u32 ctrl;
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->base;
+       u32 changed = dev->features ^ features;
+
+       if (!(changed & NETIF_F_RXCSUM))
+               return 0;
+
+       ctrl = readl(ioaddr + XGMAC_CONTROL);
+       if (features & NETIF_F_RXCSUM)
+               ctrl |= XGMAC_CONTROL_IPC;
+       else
+               ctrl &= ~XGMAC_CONTROL_IPC;
+       writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+       return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+       .ndo_open = xgmac_open,
+       .ndo_start_xmit = xgmac_xmit,
+       .ndo_stop = xgmac_stop,
+       .ndo_change_mtu = xgmac_change_mtu,
+       .ndo_set_rx_mode = xgmac_set_rx_mode,
+       .ndo_tx_timeout = xgmac_tx_timeout,
+       .ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = xgmac_poll_controller,
+#endif
+       .ndo_set_mac_address = xgmac_set_mac_address,
+       .ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+                                         struct ethtool_cmd *cmd)
+{
+       cmd->autoneg = 0;
+       cmd->duplex = DUPLEX_FULL;
+       ethtool_cmd_speed_set(cmd, 10000);
+       cmd->supported = 0;
+       cmd->advertising = 0;
+       cmd->transceiver = XCVR_INTERNAL;
+       return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+                                     struct ethtool_pauseparam *pause)
+{
+       struct xgmac_priv *priv = netdev_priv(netdev);
+
+       pause->rx_pause = priv->rx_pause;
+       pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+                                    struct ethtool_pauseparam *pause)
+{
+       struct xgmac_priv *priv = netdev_priv(netdev);
+
+       if (pause->autoneg)
+               return -EINVAL;
+
+       return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int stat_offset;
+       bool is_reg;
+};
+
+#define XGMAC_STAT(m)  \
+       { #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset)   \
+       { #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+       XGMAC_STAT(tx_frame_flushed),
+       XGMAC_STAT(tx_payload_error),
+       XGMAC_STAT(tx_ip_header_error),
+       XGMAC_STAT(tx_local_fault),
+       XGMAC_STAT(tx_remote_fault),
+       XGMAC_STAT(tx_early),
+       XGMAC_STAT(tx_process_stopped),
+       XGMAC_STAT(tx_jabber),
+       XGMAC_STAT(rx_buf_unav),
+       XGMAC_STAT(rx_process_stopped),
+       XGMAC_STAT(rx_payload_error),
+       XGMAC_STAT(rx_ip_header_error),
+       XGMAC_STAT(rx_da_filter_fail),
+       XGMAC_STAT(rx_sa_filter_fail),
+       XGMAC_STAT(fatal_bus_error),
+       XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+       XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+       XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+       XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+       XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+                                        struct ethtool_stats *dummy,
+                                        u64 *data)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       void *p = priv;
+       int i;
+
+       for (i = 0; i < XGMAC_STATS_LEN; i++) {
+               if (xgmac_gstrings_stats[i].is_reg)
+                       *data++ = readl(priv->base +
+                               xgmac_gstrings_stats[i].stat_offset);
+               else
+                       *data++ = *(u32 *)(p +
+                               xgmac_gstrings_stats[i].stat_offset);
+       }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return XGMAC_STATS_LEN;
+       default:
+               return -EINVAL;
+       }
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+                                  u8 *data)
+{
+       int i;
+       u8 *p = data;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < XGMAC_STATS_LEN; i++) {
+                       memcpy(p, xgmac_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+                              struct ethtool_wolinfo *wol)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+
+       if (device_can_wakeup(priv->device)) {
+               wol->supported = WAKE_MAGIC | WAKE_UCAST;
+               wol->wolopts = priv->wolopts;
+       }
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+                             struct ethtool_wolinfo *wol)
+{
+       struct xgmac_priv *priv = netdev_priv(dev);
+       u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+       if (!device_can_wakeup(priv->device))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~support)
+               return -EINVAL;
+
+       priv->wolopts = wol->wolopts;
+
+       if (wol->wolopts) {
+               device_set_wakeup_enable(priv->device, 1);
+               enable_irq_wake(dev->irq);
+       } else {
+               device_set_wakeup_enable(priv->device, 0);
+               disable_irq_wake(dev->irq);
+       }
+
+       return 0;
+}
+
+static struct ethtool_ops xgmac_ethtool_ops = {
+       .get_settings = xgmac_ethtool_getsettings,
+       .get_link = ethtool_op_get_link,
+       .get_pauseparam = xgmac_get_pauseparam,
+       .set_pauseparam = xgmac_set_pauseparam,
+       .get_ethtool_stats = xgmac_get_ethtool_stats,
+       .get_strings = xgmac_get_strings,
+       .get_wol = xgmac_get_wol,
+       .set_wol = xgmac_set_wol,
+       .get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct resource *res;
+       struct net_device *ndev = NULL;
+       struct xgmac_priv *priv = NULL;
+       u32 uid;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       if (!request_mem_region(res->start, resource_size(res), pdev->name))
+               return -EBUSY;
+
+       ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+       if (!ndev) {
+               ret = -ENOMEM;
+               goto err_alloc;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       priv = netdev_priv(ndev);
+       platform_set_drvdata(pdev, ndev);
+       ether_setup(ndev);
+       ndev->netdev_ops = &xgmac_netdev_ops;
+       SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+       spin_lock_init(&priv->stats_lock);
+
+       priv->device = &pdev->dev;
+       priv->dev = ndev;
+       priv->rx_pause = 1;
+       priv->tx_pause = 1;
+
+       priv->base = ioremap(res->start, resource_size(res));
+       if (!priv->base) {
+               netdev_err(ndev, "ioremap failed\n");
+               ret = -ENOMEM;
+               goto err_io;
+       }
+
+       uid = readl(priv->base + XGMAC_VERSION);
+       netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+       ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq == -ENXIO) {
+               netdev_err(ndev, "No irq resource\n");
+               ret = ndev->irq;
+               goto err_irq;
+       }
+
+       ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+                         dev_name(&pdev->dev), ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+                       ndev->irq, ret);
+               goto err_irq;
+       }
+
+       priv->pmt_irq = platform_get_irq(pdev, 1);
+       if (priv->pmt_irq == -ENXIO) {
+               netdev_err(ndev, "No pmt irq resource\n");
+               ret = priv->pmt_irq;
+               goto err_pmt_irq;
+       }
+
+       ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+                         dev_name(&pdev->dev), ndev);
+       if (ret < 0) {
+               netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+                       priv->pmt_irq, ret);
+               goto err_pmt_irq;
+       }
+
+       device_set_wakeup_capable(&pdev->dev, 1);
+       if (device_can_wakeup(priv->device))
+               priv->wolopts = WAKE_MAGIC;     /* Magic Frame as default */
+
+       ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+       if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+               ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                    NETIF_F_RXCSUM;
+       ndev->features |= ndev->hw_features;
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+
+       /* Get the MAC address */
+       xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               netdev_warn(ndev, "MAC address %pM not valid",
+                        ndev->dev_addr);
+
+       netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+       ret = register_netdev(ndev);
+       if (ret)
+               goto err_reg;
+
+       return 0;
+
+err_reg:
+       netif_napi_del(&priv->napi);
+       free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+       free_irq(ndev->irq, ndev);
+err_irq:
+       iounmap(priv->base);
+err_io:
+       free_netdev(ndev);
+err_alloc:
+       release_mem_region(res->start, resource_size(res));
+       platform_set_drvdata(pdev, NULL);
+       return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct xgmac_priv *priv = netdev_priv(ndev);
+       struct resource *res;
+
+       xgmac_mac_disable(priv->base);
+
+       /* Free the IRQ lines */
+       free_irq(ndev->irq, ndev);
+       free_irq(priv->pmt_irq, ndev);
+
+       platform_set_drvdata(pdev, NULL);
+       unregister_netdev(ndev);
+       netif_napi_del(&priv->napi);
+
+       iounmap(priv->base);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       release_mem_region(res->start, resource_size(res));
+
+       free_netdev(ndev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+       unsigned int pmt = 0;
+
+       if (mode & WAKE_MAGIC)
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+       if (mode & WAKE_UCAST)
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+       writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+       struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+       struct xgmac_priv *priv = netdev_priv(ndev);
+       u32 value;
+
+       if (!ndev || !netif_running(ndev))
+               return 0;
+
+       netif_device_detach(ndev);
+       napi_disable(&priv->napi);
+       writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+       if (device_may_wakeup(priv->device)) {
+               /* Stop TX/RX DMA Only */
+               value = readl(priv->base + XGMAC_DMA_CONTROL);
+               value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+               writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+               xgmac_pmt(priv->base, priv->wolopts);
+       } else
+               xgmac_mac_disable(priv->base);
+
+       return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+       struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+       struct xgmac_priv *priv = netdev_priv(ndev);
+       void __iomem *ioaddr = priv->base;
+
+       if (!netif_running(ndev))
+               return 0;
+
+       xgmac_pmt(ioaddr, 0);
+
+       /* Enable the MAC and DMA */
+       xgmac_mac_enable(ioaddr);
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+       writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+       netif_device_attach(ndev);
+       napi_enable(&priv->napi);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+       { .compatible = "calxeda,hb-xgmac", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+       .driver = {
+               .name = "calxedaxgmac",
+               .of_match_table = xgmac_of_match,
+       },
+       .probe = xgmac_probe,
+       .remove = xgmac_remove,
+       .driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
index ca26d97..1d17c92 100644 (file)
@@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct adapter *adapter = dev->ml_priv;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(adapter->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
 }
 
 static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
        return 0;
 }
 
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
        struct adapter *adapter = dev->ml_priv;
 
        if (changed & NETIF_F_HW_VLAN_RX)
index f9b6023..47a8435 100644 (file)
@@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
 /*
  * Enable/disable VLAN acceleration.
  */
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
 {
        struct sge *sge = adapter->sge;
 
index e03980b..b9bf16b 100644 (file)
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
 int t1_poll(struct napi_struct *, int);
 
 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
 void t1_sge_start(struct sge *);
 void t1_sge_stop(struct sge *);
 int t1_sge_intr_error_handler(struct sge *);
index 4d15c8f..857cc25 100644 (file)
@@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
        t3_get_tp_version(adapter, &tp_vers);
        spin_unlock(&adapter->stats_lock);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(adapter->pdev));
-       if (!fw_vers)
-               strcpy(info->fw_version, "N/A");
-       else {
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
+       if (fw_vers)
                snprintf(info->fw_version, sizeof(info->fw_version),
                         "%s %u.%u.%u TP %u.%u.%u",
                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
                         G_TP_VERSION_MAJOR(tp_vers),
                         G_TP_VERSION_MINOR(tp_vers),
                         G_TP_VERSION_MICRO(tp_vers));
-       }
 }
 
 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
        }
 }
 
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features)
        t3_synchronize_rx(adapter, pi);
 }
 
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                cxgb_vlan_mode(dev, features);
index 90ff131..65e4b28 100644 (file)
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
        case (NETEVENT_REDIRECT):{
                struct netevent_redirect *nr = ctx;
                cxgb_redirect(nr->old, nr->new);
-               cxgb_neigh_update(dst_get_neighbour(nr->new));
+               cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
                break;
        }
        default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
 
 static void cxgb_neigh_update(struct neighbour *neigh)
 {
-       struct net_device *dev = neigh->dev;
+       struct net_device *dev;
 
+       if (!neigh)
+               return;
+       dev = neigh->dev;
        if (dev && (is_offloading(dev))) {
                struct t3cdev *tdev = dev2t3cdev(dev);
 
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
 static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
 {
        struct net_device *olddev, *newdev;
+       struct neighbour *n;
        struct tid_info *ti;
        struct t3cdev *tdev;
        u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        struct l2t_entry *e;
        struct t3c_tid_entry *te;
 
-       olddev = dst_get_neighbour(old)->dev;
-       newdev = dst_get_neighbour(new)->dev;
+       n = dst_get_neighbour_noref(old);
+       if (!n)
+               return;
+       olddev = n->dev;
+
+       n = dst_get_neighbour_noref(new);
+       if (!n)
+               return;
+       newdev = n->dev;
+
        if (!is_offloading(olddev))
                return;
        if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
        }
 
        /* Add new L2T entry */
-       e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+       e = t3_l2t_get(tdev, new, newdev);
        if (!e) {
                printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
                       __func__);
@@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
 
 out_free_l2t:
        t3_free_l2t(L2DATA(dev));
-       rcu_assign_pointer(dev->l2opt, NULL);
+       RCU_INIT_POINTER(dev->l2opt, NULL);
 out_free:
        kfree(t);
        return err;
@@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
        rcu_read_lock();
        d = L2DATA(tdev);
        rcu_read_unlock();
-       rcu_assign_pointer(tdev->l2opt, NULL);
+       RCU_INIT_POINTER(tdev->l2opt, NULL);
        call_rcu(&d->rcu_head, clean_l2_data);
        if (t->nofail_skb)
                kfree_skb(t->nofail_skb);
index 70fec8b..3fa3c88 100644 (file)
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
        spin_unlock(&e->lock);
 }
 
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                             struct net_device *dev)
 {
        struct l2t_entry *e = NULL;
+       struct neighbour *neigh;
+       struct port_info *p;
        struct l2t_data *d;
        int hash;
-       u32 addr = *(u32 *) neigh->primary_key;
-       int ifidx = neigh->dev->ifindex;
-       struct port_info *p = netdev_priv(dev);
-       int smt_idx = p->port_id;
+       u32 addr;
+       int ifidx;
+       int smt_idx;
 
        rcu_read_lock();
+       neigh = dst_get_neighbour_noref(dst);
+       if (!neigh)
+               goto done_rcu;
+
+       addr = *(u32 *) neigh->primary_key;
+       ifidx = neigh->dev->ifindex;
+
+       if (!dev)
+               dev = neigh->dev;
+       p = netdev_priv(dev);
+       smt_idx = p->port_id;
+
        d = L2DATA(cdev);
        if (!d)
                goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                        l2t_hold(d, e);
                        if (atomic_read(&e->refcnt) == 1)
                                reuse_entry(e, neigh);
-                       goto done;
+                       goto done_unlock;
                }
 
        /* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
                        e->vlan = VLAN_NONE;
                spin_unlock(&e->lock);
        }
-done:
+done_unlock:
        write_unlock_bh(&d->lock);
 done_rcu:
        rcu_read_unlock();
index c5f5479..c4e8643 100644 (file)
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
 
 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                             struct net_device *dev);
 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
                     struct l2t_entry *e);
index 4c8f42a..a34e7ce 100644 (file)
@@ -1002,13 +1002,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct adapter *adapter = netdev2adap(dev);
 
-       strcpy(info->driver, KBUILD_MODNAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(adapter->pdev));
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(adapter->pdev),
+               sizeof(info->bus_info));
 
-       if (!adapter->params.fw_vers)
-               strcpy(info->fw_version, "N/A");
-       else
+       if (adapter->params.fw_vers)
                snprintf(info->fw_version, sizeof(info->fw_version),
                        "%u.%u.%u.%u, TP %u.%u.%u.%u",
                        FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1854,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        return err;
 }
 
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
        const struct port_info *pi = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
        int err;
 
        if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -3537,7 +3536,7 @@ static int __devinit init_one(struct pci_dev *pdev,
 {
        int func, i, err;
        struct port_info *pi;
-       unsigned int highdma = 0;
+       bool highdma = false;
        struct adapter *adapter = NULL;
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3562,7 @@ static int __devinit init_one(struct pci_dev *pdev,
        }
 
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               highdma = NETIF_F_HIGHDMA;
+               highdma = true;
                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
                if (err) {
                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3636,9 @@ static int __devinit init_one(struct pci_dev *pdev,
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                        NETIF_F_RXCSUM | NETIF_F_RXHASH |
                        NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-               netdev->features |= netdev->hw_features | highdma;
+               if (highdma)
+                       netdev->hw_features |= NETIF_F_HIGHDMA;
+               netdev->features |= netdev->hw_features;
                netdev->vlan_features = netdev->features & VLAN_FEAT;
 
                netdev->priv_flags |= IFF_UNICAST_FLT;
index 140254c..2dae795 100644 (file)
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
        __be64 *d = &q->desc[q->pidx];
        struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 
-       gfp |= __GFP_NOWARN;         /* failures are expected */
+       gfp |= __GFP_NOWARN | __GFP_COLD;
 
 #if FL_PG_ORDER > 0
        /*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 #endif
 
        while (n--) {
-               pg = __netdev_alloc_page(adap->port[0], gfp);
+               pg = alloc_page(gfp);
                if (unlikely(!pg)) {
                        q->alloc_failed++;
                        break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
                mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
-                       netdev_free_page(adap->port[0], pg);
+                       put_page(pg);
                        goto out;
                }
                *d++ = cpu_to_be64(mapping);
index da9072b..8155cfe 100644 (file)
@@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
        return ret;
 }
 
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct port_info *pi = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
 {
        struct adapter *adapter = netdev2adap(dev);
 
-       strcpy(drvinfo->driver, KBUILD_MODNAME);
-       strcpy(drvinfo->version, DRV_VERSION);
-       strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+       strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+               sizeof(drvinfo->bus_info));
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                 "%u.%u.%u.%u, TP %u.%u.%u.%u",
                 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
index 8d5d55a..c381db2 100644 (file)
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 
 alloc_small_pages:
        while (n--) {
-               page = __netdev_alloc_page(adapter->port[0],
-                                          gfp | __GFP_NOWARN);
+               page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
                if (unlikely(!page)) {
                        fl->alloc_failed++;
                        break;
@@ -664,7 +663,7 @@ alloc_small_pages:
                dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
                                       PCI_DMA_FROMDEVICE);
                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
-                       netdev_free_page(adapter->port[0], page);
+                       put_page(page);
                        break;
                }
                *d++ = cpu_to_be64(dma_addr);
index fd6247b..bf0fc56 100644 (file)
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct enic *enic = netdev_priv(netdev);
+       int err;
 
        spin_lock(&enic->devcmd_lock);
-       enic_add_vlan(enic, vid);
+       err = enic_add_vlan(enic, vid);
        spin_unlock(&enic->devcmd_lock);
+
+       return err;
 }
 
 /* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct enic *enic = netdev_priv(netdev);
+       int err;
 
        spin_lock(&enic->devcmd_lock);
-       enic_del_vlan(enic, vid);
+       err = enic_del_vlan(enic, vid);
        spin_unlock(&enic->devcmd_lock);
+
+       return err;
 }
 
 int enic_dev_enable2(struct enic *enic, int active)
index 1f83a47..da1cba3 100644 (file)
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
        int broadcast, int promisc, int allmulti);
 int enic_dev_add_addr(struct enic *enic, u8 *addr);
 int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 int enic_dev_notify_unset(struct enic *enic);
 int enic_dev_hang_notify(struct enic *enic);
 int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
index c3786fd..2fd9db4 100644 (file)
@@ -217,11 +217,11 @@ static void enic_get_drvinfo(struct net_device *netdev,
 
        enic_dev_fw_info(enic, &fw_info);
 
-       strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
-       strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-       strncpy(drvinfo->fw_version, fw_info->fw_version,
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, fw_info->fw_version,
                sizeof(drvinfo->fw_version));
-       strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+       strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
                sizeof(drvinfo->bus_info));
 }
 
@@ -2379,7 +2379,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
 
 #endif
        /* Allocate structure for port profiles */
-       enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+       enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
        if (!enic->pp) {
                pr_err("port profile alloc failed, aborting\n");
                err = -ENOMEM;
index 2a22f52..f801754 100644 (file)
@@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev)
        return mii_nway_restart(&dm->mii);
 }
 
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        board_info_t *dm = to_dm9000_board(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
        unsigned long flags;
 
        if (!(changed & NETIF_F_RXCSUM))
index 1427739..1eb46a0 100644 (file)
@@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
 {
        struct de_private *de = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(de->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
        info->eedump_len = DE_EEPROM_SIZE;
 }
 
index 17b11ee..51f7542 100644 (file)
@@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
 {
        struct dmfe_board_info *np = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        if (np->pdev)
-               strcpy(info->bus_info, pci_name(np->pdev));
+               strlcpy(info->bus_info, pci_name(np->pdev),
+                       sizeof(info->bus_info));
        else
                sprintf(info->bus_info, "EISA 0x%lx %d",
                        dev->base_addr, dev->irq);
index 9656dd0..4eb0d76 100644 (file)
@@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct tulip_private *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
 
index 7a44a7a..48b0b65 100644 (file)
@@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev,
 {
        struct uli526x_board_info *np = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        if (np->pdev)
-               strcpy(info->bus_info, pci_name(np->pdev));
+               strlcpy(info->bus_info, pci_name(np->pdev),
+                       sizeof(info->bus_info));
        else
                sprintf(info->bus_info, "EISA 0x%lx %d",
                        dev->base_addr, dev->irq);
index 4d01219..52da7b2 100644 (file)
@@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
 {
        struct netdev_private *np = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index dcd7f7a..28a3a9b 100644 (file)
@@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev)
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct netdev_private *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
index c1063d1..ce88c0f 100644 (file)
@@ -804,9 +804,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 static void dnet_get_drvinfo(struct net_device *dev,
                             struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, "0");
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, "0", sizeof(info->bus_info));
 }
 
 static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +977,7 @@ static struct platform_driver dnet_driver = {
        },
 };
 
-static int __init dnet_init(void)
-{
-       return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
-       platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
index 644e8fe..a3588fb 100644 (file)
@@ -288,15 +288,13 @@ struct be_drv_stats {
 };
 
 struct be_vf_cfg {
-       unsigned char vf_mac_addr[ETH_ALEN];
-       u32 vf_if_handle;
-       u32 vf_pmac_id;
-       u16 vf_vlan_tag;
-       u32 vf_tx_rate;
+       unsigned char mac_addr[ETH_ALEN];
+       int if_handle;
+       int pmac_id;
+       u16 vlan_tag;
+       u32 tx_rate;
 };
 
-#define BE_INVALID_PMAC_ID             0xffffffff
-
 struct be_adapter {
        struct pci_dev *pdev;
        struct net_device *netdev;
@@ -347,11 +345,13 @@ struct be_adapter {
 
        /* Ethtool knobs and info */
        char fw_ver[FW_VER_LEN];
-       u32 if_handle;          /* Used to configure filtering */
+       int if_handle;          /* Used to configure filtering */
        u32 pmac_id;            /* MAC addr handle used by BE card */
        u32 beacon_state;       /* for set_phys_id */
 
        bool eeh_err;
+       bool ue_detected;
+       bool fw_timeout;
        u32 port_num;
        bool promiscuous;
        bool wol;
@@ -359,7 +359,6 @@ struct be_adapter {
        u32 function_caps;
        u32 rx_fc;              /* Rx flow control */
        u32 tx_fc;              /* Tx flow control */
-       bool ue_detected;
        bool stats_cmd_sent;
        int link_speed;
        u8 port_type;
@@ -369,16 +368,20 @@ struct be_adapter {
        u32 flash_status;
        struct completion flash_compl;
 
-       bool be3_native;
-       bool sriov_enabled;
-       struct be_vf_cfg *vf_cfg;
+       u32 num_vfs;
        u8 is_virtfn;
+       struct be_vf_cfg *vf_cfg;
+       bool be3_native;
        u32 sli_family;
        u8 hba_port_num;
        u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
+#define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i)                                        \
+       for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+               i++, vf_cfg++)
 
 /* BladeEngine Generation numbers */
 #define BE_GEN2 2
@@ -524,6 +527,11 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
        return adapter->num_rx_qs > 1;
 }
 
+static inline bool be_error(struct be_adapter *adapter)
+{
+       return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
                u16 num_popped);
 extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
index 2c7b366..62868ea 100644 (file)
@@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
-       if (adapter->eeh_err) {
-               dev_info(&adapter->pdev->dev,
-                       "Error in Card Detected! Cannot issue commands\n");
+       if (be_error(adapter))
                return;
-       }
 
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -266,10 +263,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
        int i, num, status = 0;
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
        for (i = 0; i < mcc_timeout; i++) {
+               if (be_error(adapter))
+                       return -EIO;
+
                num = be_process_mcc(adapter, &status);
                if (num)
                        be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +277,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
                udelay(100);
        }
        if (i == mcc_timeout) {
-               dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+               dev_err(&adapter->pdev->dev, "FW not responding\n");
+               adapter->fw_timeout = true;
                return -1;
        }
        return status;
@@ -298,26 +296,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
        int msecs = 0;
        u32 ready;
 
-       if (adapter->eeh_err) {
-               dev_err(&adapter->pdev->dev,
-                       "Error detected in card.Cannot issue commands\n");
-               return -EIO;
-       }
-
        do {
+               if (be_error(adapter))
+                       return -EIO;
+
                ready = ioread32(db);
-               if (ready == 0xffffffff) {
-                       dev_err(&adapter->pdev->dev,
-                               "pci slot disconnected\n");
+               if (ready == 0xffffffff)
                        return -1;
-               }
 
                ready &= MPU_MAILBOX_DB_RDY_MASK;
                if (ready)
                        break;
 
                if (msecs > 4000) {
-                       dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+                       dev_err(&adapter->pdev->dev, "FW not responding\n");
+                       adapter->fw_timeout = true;
                        be_detect_dump_ue(adapter);
                        return -1;
                }
@@ -555,9 +548,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
        u8 *wrb;
        int status;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
 
@@ -619,7 +609,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
 
 /* Use MCC */
 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle)
+                       u8 type, bool permanent, u32 if_handle, u32 pmac_id)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_mac_query *req;
@@ -641,6 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
                req->permanent = 1;
        } else {
                req->if_id = cpu_to_le16((u16) if_handle);
+               req->pmac_id = cpu_to_le32(pmac_id);
                req->permanent = 0;
        }
 
@@ -695,12 +686,15 @@ err:
 }
 
 /* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_pmac_del *req;
        int status;
 
+       if (pmac_id == -1)
+               return 0;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -923,10 +917,14 @@ int be_cmd_txq_create(struct be_adapter *adapter,
        void *ctxt;
        int status;
 
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
 
-       wrb = wrb_from_mbox(adapter);
        req = embedded_payload(wrb);
        ctxt = &req->context;
 
@@ -952,14 +950,15 @@ int be_cmd_txq_create(struct be_adapter *adapter,
 
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
-       status = be_mbox_notify_wait(adapter);
+       status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
                txq->id = le16_to_cpu(resp->cid);
                txq->created = true;
        }
 
-       mutex_unlock(&adapter->mbox_lock);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
 
        return status;
 }
@@ -1018,9 +1017,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
        u8 subsys = 0, opcode = 0;
        int status;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
 
@@ -1136,16 +1132,13 @@ err:
 }
 
 /* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_if_destroy *req;
        int status;
 
-       if (adapter->eeh_err)
-               return -EIO;
-
-       if (!interface_id)
+       if (interface_id == -1)
                return 0;
 
        spin_lock_bh(&adapter->mcc_lock);
@@ -1254,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
        }
        req = embedded_payload(wrb);
 
+       if (lancer_chip(adapter))
+               req->hdr.version = 1;
+
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
 
@@ -1673,8 +1669,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_rss_config *req;
-       u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
-                       0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+       u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+                       0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+                       0x3ea83c02, 0x4a110304};
        int status;
 
        if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1833,53 @@ err_unlock:
        return status;
 }
 
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+               u32 data_size, u32 data_offset, const char *obj_name,
+               u32 *data_read, u32 *eof, u8 *addn_status)
+{
+       struct be_mcc_wrb *wrb;
+       struct lancer_cmd_req_read_object *req;
+       struct lancer_cmd_resp_read_object *resp;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err_unlock;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                       OPCODE_COMMON_READ_OBJECT,
+                       sizeof(struct lancer_cmd_req_read_object), wrb,
+                       NULL);
+
+       req->desired_read_len = cpu_to_le32(data_size);
+       req->read_offset = cpu_to_le32(data_offset);
+       strcpy(req->object_name, obj_name);
+       req->descriptor_count = cpu_to_le32(1);
+       req->buf_len = cpu_to_le32(data_size);
+       req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+       req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+       status = be_mcc_notify_wait(adapter);
+
+       resp = embedded_payload(wrb);
+       if (!status) {
+               *data_read = le32_to_cpu(resp->actual_read_len);
+               *eof = le32_to_cpu(resp->eof);
+       } else {
+               *addn_status = resp->additional_status;
+       }
+
+err_unlock:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
                        u32 flash_type, u32 flash_opcode, u32 buf_size)
 {
@@ -2238,3 +2282,99 @@ err:
        mutex_unlock(&adapter->mbox_lock);
        return status;
 }
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+                                                       u32 *pmac_id)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_get_mac_list *req;
+       int status;
+       int mac_count;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                               OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+                               wrb, NULL);
+
+       req->hdr.domain = domain;
+
+       status = be_mcc_notify_wait(adapter);
+       if (!status) {
+               struct be_cmd_resp_get_mac_list *resp =
+                                               embedded_payload(wrb);
+               int i;
+               u8 *ctxt = &resp->context[0][0];
+               status = -EIO;
+               mac_count = resp->mac_count;
+               be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+               for (i = 0; i < mac_count; i++) {
+                       if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+                                          act, ctxt)) {
+                               *pmac_id = AMAP_GET_BITS
+                                       (struct amap_get_mac_list_context,
+                                        macid, ctxt);
+                               status = 0;
+                               break;
+                       }
+                       ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+               }
+       }
+
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+                       u8 mac_count, u32 domain)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_mac_list *req;
+       int status;
+       struct be_dma_mem cmd;
+
+       memset(&cmd, 0, sizeof(struct be_dma_mem));
+       cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+                       &cmd.dma, GFP_KERNEL);
+       if (!cmd.va) {
+               dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = cmd.va;
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                               OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+                               wrb, &cmd);
+
+       req->hdr.domain = domain;
+       req->mac_count = mac_count;
+       if (mac_count)
+               memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+       status = be_mcc_notify_wait(adapter);
+
+err:
+       dma_free_coherent(&adapter->pdev->dev, cmd.size,
+                               cmd.va, cmd.dma);
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
index a35cd03..0b694c6 100644 (file)
@@ -189,6 +189,9 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_PHY_DETAILS                  102
 #define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP          103
 #define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES   121
+#define OPCODE_COMMON_GET_MAC_LIST                     147
+#define OPCODE_COMMON_SET_MAC_LIST                     148
+#define OPCODE_COMMON_READ_OBJECT                      171
 #define OPCODE_COMMON_WRITE_OBJECT                     172
 
 #define OPCODE_ETH_RSS_CONFIG                          1
@@ -294,6 +297,7 @@ struct be_cmd_req_mac_query {
        u8 type;
        u8 permanent;
        u16 if_id;
+       u32 pmac_id;
 } __packed;
 
 struct be_cmd_resp_mac_query {
@@ -1161,6 +1165,38 @@ struct lancer_cmd_resp_write_object {
        u32 actual_write_len;
 };
 
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK                 (32*1024)
+#define LANCER_READ_FILE_EOF_MASK              0x80000000
+
+#define LANCER_FW_DUMP_FILE                    "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE                     "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE                     "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+       struct be_cmd_req_hdr hdr;
+       u32 desired_read_len;
+       u32 read_offset;
+       u8 object_name[104];
+       u32 descriptor_count;
+       u32 buf_len;
+       u32 addr_low;
+       u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+       u8 opcode;
+       u8 subsystem;
+       u8 rsvd1[2];
+       u8 status;
+       u8 additional_status;
+       u8 rsvd2[2];
+       u32 resp_len;
+       u32 actual_resp_len;
+       u32 actual_read_len;
+       u32 eof;
+};
+
 /************************ WOL *******************************/
 struct be_cmd_req_acpi_wol_magic_config{
        struct be_cmd_req_hdr hdr;
@@ -1307,6 +1343,34 @@ struct be_cmd_resp_set_func_cap {
        u8 rsvd[212];
 };
 
+/******************** GET/SET_MACLIST  **************************/
+#define BE_MAX_MAC                     64
+struct amap_get_mac_list_context {
+       u8 macid[31];
+       u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+       struct be_cmd_req_hdr hdr;
+       u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+       struct be_cmd_resp_hdr hdr;
+       u8 mac_count;
+       u8 rsvd1;
+       u16 rsvd2;
+       u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+       struct be_cmd_req_hdr hdr;
+       u8 mac_count;
+       u8 rsvd1;
+       u16 rsvd2;
+       struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
 /*************** HW Stats Get v1 **********************************/
 #define BE_TXP_SW_SZ                   48
 struct be_port_rxf_stats_v1 {
@@ -1413,15 +1477,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                       u8 type, bool permanent, u32 if_handle);
+                       u8 type, bool permanent, u32 if_handle, u32 pmac_id);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
                        u32 if_id, u32 *pmac_id, u32 domain);
 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
-                       u32 pmac_id, u32 domain);
+                       int pmac_id, u32 domain);
 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
                        u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
                        u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
                        u32 domain);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
                        struct be_queue_info *eq, int eq_delay);
@@ -1480,6 +1544,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter,
                                u32 data_size, u32 data_offset,
                                const char *obj_name,
                                u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+               u32 data_size, u32 data_offset, const char *obj_name,
+               u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
                                int offset);
 extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1573,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
 extern int be_cmd_req_native_mode(struct be_adapter *adapter);
 extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
 extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+                                                       u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+                                               u8 mac_count, u32 domain);
 
index bf8153e..6ba2dc6 100644 (file)
@@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev,
        memset(fw_on_flash, 0 , sizeof(fw_on_flash));
        be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
 
-       strcpy(drvinfo->driver, DRV_NAME);
-       strcpy(drvinfo->version, DRV_VER);
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
        strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
        if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
                strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev,
                strcat(drvinfo->fw_version, "]");
        }
 
-       strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->testinfo_len = 0;
        drvinfo->regdump_len = 0;
        drvinfo->eedump_len = 0;
 }
 
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+       u32 data_read = 0, eof;
+       u8 addn_status;
+       struct be_dma_mem data_len_cmd;
+       int status;
+
+       memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+       /* data_offset and data_size should be 0 to get reg len */
+       status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+                               file_name, &data_read, &eof, &addn_status);
+
+       return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+               u32 buf_len, void *buf)
+{
+       struct be_dma_mem read_cmd;
+       u32 read_len = 0, total_read_len = 0, chunk_size;
+       u32 eof = 0;
+       u8 addn_status;
+       int status = 0;
+
+       read_cmd.size = LANCER_READ_FILE_CHUNK;
+       read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+                       &read_cmd.dma);
+
+       if (!read_cmd.va) {
+               dev_err(&adapter->pdev->dev,
+                               "Memory allocation failure while reading dump\n");
+               return -ENOMEM;
+       }
+
+       while ((total_read_len < buf_len) && !eof) {
+               chunk_size = min_t(u32, (buf_len - total_read_len),
+                               LANCER_READ_FILE_CHUNK);
+               chunk_size = ALIGN(chunk_size, 4);
+               status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+                               total_read_len, file_name, &read_len,
+                               &eof, &addn_status);
+               if (!status) {
+                       memcpy(buf + total_read_len, read_cmd.va, read_len);
+                       total_read_len += read_len;
+                       eof &= LANCER_READ_FILE_EOF_MASK;
+               } else {
+                       status = -EIO;
+                       break;
+               }
+       }
+       pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+                       read_cmd.dma);
+
+       return status;
+}
+
 static int
 be_get_reg_len(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        u32 log_size = 0;
 
-       if (be_physfn(adapter))
-               be_cmd_get_reg_len(adapter, &log_size);
-
+       if (be_physfn(adapter)) {
+               if (lancer_chip(adapter))
+                       log_size = lancer_cmd_get_file_len(adapter,
+                                       LANCER_FW_DUMP_FILE);
+               else
+                       be_cmd_get_reg_len(adapter, &log_size);
+       }
        return log_size;
 }
 
@@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
 
        if (be_physfn(adapter)) {
                memset(buf, 0, regs->len);
-               be_cmd_get_regs(adapter, regs->len, buf);
+               if (lancer_chip(adapter))
+                       lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+                                       regs->len, buf);
+               else
+                       be_cmd_get_regs(adapter, regs->len, buf);
        }
 }
 
@@ -453,16 +520,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        return 0;
 }
 
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+                            struct ethtool_ringparam *ring)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       ring->rx_max_pending = adapter->rx_obj[0].q.len;
-       ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
-       ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
-       ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+       ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+       ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
 }
 
 static void
@@ -660,7 +724,17 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
 static int
 be_get_eeprom_len(struct net_device *netdev)
 {
-       return BE_READ_SEEPROM_LEN;
+       struct be_adapter *adapter = netdev_priv(netdev);
+       if (lancer_chip(adapter)) {
+               if (be_physfn(adapter))
+                       return lancer_cmd_get_file_len(adapter,
+                                       LANCER_VPD_PF_FILE);
+               else
+                       return lancer_cmd_get_file_len(adapter,
+                                       LANCER_VPD_VF_FILE);
+       } else {
+               return BE_READ_SEEPROM_LEN;
+       }
 }
 
 static int
@@ -675,6 +749,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
        if (!eeprom->len)
                return -EINVAL;
 
+       if (lancer_chip(adapter)) {
+               if (be_physfn(adapter))
+                       return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+                                       eeprom->len, data);
+               else
+                       return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+                                       eeprom->len, data);
+       }
+
        eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
 
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
index bf266a0..9b5304a 100644 (file)
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
 MODULE_AUTHOR("ServerEngines Corporation");
 MODULE_LICENSE("GPL");
 
-static ushort rx_frag_size = 2048;
 static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
 module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
 
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -237,7 +238,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                return -EADDRNOTAVAIL;
 
        status = be_cmd_mac_addr_query(adapter, current_mac,
-                       MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+                               MAC_ADDRESS_TYPE_NETWORK, false,
+                               adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -315,6 +317,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
        struct be_drv_stats *drvs = &adapter->drv_stats;
 
        be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+       drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+       drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
        drvs->rx_pause_frames = port_stats->rx_pause_frames;
        drvs->rx_crc_errors = port_stats->rx_crc_errors;
        drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -549,11 +553,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
        wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
 }
 
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+                                       struct sk_buff *skb)
+{
+       u8 vlan_prio;
+       u16 vlan_tag;
+
+       vlan_tag = vlan_tx_tag_get(skb);
+       vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+       /* If vlan priority provided by OS is NOT in available bmap */
+       if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+               vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+                               adapter->recommended_prio;
+
+       return vlan_tag;
+}
+
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                struct sk_buff *skb, u32 wrb_cnt, u32 len)
 {
-       u8 vlan_prio = 0;
-       u16 vlan_tag = 0;
+       u16 vlan_tag;
 
        memset(hdr, 0, sizeof(*hdr));
 
@@ -584,12 +603,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
 
        if (vlan_tx_tag_present(skb)) {
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
-               vlan_tag = vlan_tx_tag_get(skb);
-               vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-               /* If vlan priority provided by OS is NOT in available bmap */
-               if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
-                       vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
-                                       adapter->recommended_prio;
+               vlan_tag = be_get_tx_vlan_tag(adapter, skb);
                AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
        }
 
@@ -692,6 +706,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
        u32 start = txq->head;
        bool dummy_wrb, stopped = false;
 
+       /* For vlan tagged pkts, BE
+        * 1) calculates checksum even when CSO is not requested
+        * 2) calculates checksum wrongly for padded pkt less than
+        * 60 bytes long.
+        * As a workaround disable TX vlan offloading in such cases.
+        */
+       if (unlikely(vlan_tx_tag_present(skb) &&
+                    (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+               skb = skb_share_check(skb, GFP_ATOMIC);
+               if (unlikely(!skb))
+                       goto tx_drop;
+
+               skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+               if (unlikely(!skb))
+                       goto tx_drop;
+
+               skb->vlan_tci = 0;
+       }
+
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +752,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
                txq->head = start;
                dev_kfree_skb_any(skb);
        }
+tx_drop:
        return NETDEV_TX_OK;
 }
 
@@ -746,15 +780,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
  */
 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
 {
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
        u16 vtag[BE_NUM_VLANS_SUPPORTED];
        u16 ntags = 0, i;
        int status = 0;
-       u32 if_handle;
 
        if (vf) {
-               if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
-               vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
-               status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+               vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+               status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+                                           1, 1, 0);
        }
 
        /* No need to further configure vids if in promiscuous mode */
@@ -779,31 +813,35 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
        return status;
 }
 
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
        adapter->vlans_added++;
        if (!be_physfn(adapter))
-               return;
+               return 0;
 
        adapter->vlan_tag[vid] = 1;
        if (adapter->vlans_added <= (adapter->max_vlans + 1))
                be_vid_config(adapter, false, 0);
+
+       return 0;
 }
 
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
        adapter->vlans_added--;
 
        if (!be_physfn(adapter))
-               return;
+               return 0;
 
        adapter->vlan_tag[vid] = 0;
        if (adapter->vlans_added <= adapter->max_vlans)
                be_vid_config(adapter, false, 0);
+
+       return 0;
 }
 
 static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +878,30 @@ done:
 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
        int status;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+       if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
                return -EINVAL;
 
-       if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
-               status = be_cmd_pmac_del(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+       if (lancer_chip(adapter)) {
+               status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
+       } else {
+               status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+                                        vf_cfg->pmac_id, vf + 1);
 
-       status = be_cmd_pmac_add(adapter, mac,
-                               adapter->vf_cfg[vf].vf_if_handle,
-                               &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+               status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+                                        &vf_cfg->pmac_id, vf + 1);
+       }
 
        if (status)
                dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
                                mac, vf);
        else
-               memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+               memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
        return status;
 }
@@ -870,18 +910,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
                        struct ifla_vf_info *vi)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if (vf >= num_vfs)
+       if (vf >= adapter->num_vfs)
                return -EINVAL;
 
        vi->vf = vf;
-       vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
-       vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+       vi->tx_rate = vf_cfg->tx_rate;
+       vi->vlan = vf_cfg->vlan_tag;
        vi->qos = 0;
-       memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+       memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
 
        return 0;
 }
@@ -892,17 +933,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if ((vf >= num_vfs) || (vlan > 4095))
+       if (vf >= adapter->num_vfs || vlan > 4095)
                return -EINVAL;
 
        if (vlan) {
-               adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+               adapter->vf_cfg[vf].vlan_tag = vlan;
                adapter->vlans_added++;
        } else {
-               adapter->vf_cfg[vf].vf_vlan_tag = 0;
+               adapter->vf_cfg[vf].vlan_tag = 0;
                adapter->vlans_added--;
        }
 
@@ -920,16 +961,16 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        struct be_adapter *adapter = netdev_priv(netdev);
        int status = 0;
 
-       if (!adapter->sriov_enabled)
+       if (!sriov_enabled(adapter))
                return -EPERM;
 
-       if ((vf >= num_vfs) || (rate < 0))
+       if (vf >= adapter->num_vfs || rate < 0)
                return -EINVAL;
 
        if (rate > 10000)
                rate = 10000;
 
-       adapter->vf_cfg[vf].vf_tx_rate = rate;
+       adapter->vf_cfg[vf].tx_rate = rate;
        status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
        if (status)
@@ -1645,8 +1686,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
 
 static int be_num_txqs_want(struct be_adapter *adapter)
 {
-       if ((num_vfs && adapter->sriov_enabled) ||
-               be_is_mc(adapter) ||
+       if (sriov_enabled(adapter) || be_is_mc(adapter) ||
                lancer_chip(adapter) || !be_physfn(adapter) ||
                adapter->generation == BE_GEN2)
                return 1;
@@ -1662,9 +1702,12 @@ static int be_tx_queues_create(struct be_adapter *adapter)
        u8 i;
 
        adapter->num_tx_qs = be_num_txqs_want(adapter);
-       if (adapter->num_tx_qs != MAX_TX_QS)
+       if (adapter->num_tx_qs != MAX_TX_QS) {
+               rtnl_lock();
                netif_set_real_num_tx_queues(adapter->netdev,
                        adapter->num_tx_qs);
+               rtnl_unlock();
+       }
 
        adapter->tx_eq.max_eqd = 0;
        adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1736,6 @@ static int be_tx_queues_create(struct be_adapter *adapter)
                if (be_queue_alloc(adapter, q, TX_Q_LEN,
                        sizeof(struct be_eth_wrb)))
                        goto err;
-
-               if (be_cmd_txq_create(adapter, q, cq))
-                       goto err;
        }
        return 0;
 
@@ -1728,8 +1768,8 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
 static u32 be_num_rxqs_want(struct be_adapter *adapter)
 {
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-               !adapter->sriov_enabled && be_physfn(adapter) &&
-               !be_is_mc(adapter)) {
+            !sriov_enabled(adapter) && be_physfn(adapter) &&
+            !be_is_mc(adapter)) {
                return 1 + MAX_RSS_QS; /* one default non-RSS queue */
        } else {
                dev_warn(&adapter->pdev->dev,
@@ -1982,6 +2022,9 @@ void be_detect_dump_ue(struct be_adapter *adapter)
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        u32 i;
 
+       if (adapter->eeh_err || adapter->ue_detected)
+               return;
+
        if (lancer_chip(adapter)) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2051,8 @@ void be_detect_dump_ue(struct be_adapter *adapter)
                sliport_status & SLIPORT_STATUS_ERR_MASK) {
                adapter->ue_detected = true;
                adapter->eeh_err = true;
-               dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+               dev_err(&adapter->pdev->dev,
+                       "Unrecoverable error in the card\n");
        }
 
        if (ue_lo) {
@@ -2036,53 +2080,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
        }
 }
 
-static void be_worker(struct work_struct *work)
-{
-       struct be_adapter *adapter =
-               container_of(work, struct be_adapter, work.work);
-       struct be_rx_obj *rxo;
-       int i;
-
-       if (!adapter->ue_detected)
-               be_detect_dump_ue(adapter);
-
-       /* when interrupts are not yet enabled, just reap any pending
-       * mcc completions */
-       if (!netif_running(adapter->netdev)) {
-               int mcc_compl, status = 0;
-
-               mcc_compl = be_process_mcc(adapter, &status);
-
-               if (mcc_compl) {
-                       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
-                       be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
-               }
-
-               goto reschedule;
-       }
-
-       if (!adapter->stats_cmd_sent) {
-               if (lancer_chip(adapter))
-                       lancer_cmd_get_pport_stats(adapter,
-                                               &adapter->stats_cmd);
-               else
-                       be_cmd_get_stats(adapter, &adapter->stats_cmd);
-       }
-
-       for_all_rx_queues(adapter, rxo, i) {
-               be_rx_eqd_update(adapter, rxo);
-
-               if (rxo->rx_post_starved) {
-                       rxo->rx_post_starved = false;
-                       be_post_rx_frags(rxo, GFP_KERNEL);
-               }
-       }
-
-reschedule:
-       adapter->work_counter++;
-       schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
 static void be_msix_disable(struct be_adapter *adapter)
 {
        if (msix_enabled(adapter)) {
@@ -2119,27 +2116,28 @@ done:
 static int be_sriov_enable(struct be_adapter *adapter)
 {
        be_check_sriov_fn_type(adapter);
+
 #ifdef CONFIG_PCI_IOV
        if (be_physfn(adapter) && num_vfs) {
                int status, pos;
-               u16 nvfs;
+               u16 dev_vfs;
 
                pos = pci_find_ext_capability(adapter->pdev,
                                                PCI_EXT_CAP_ID_SRIOV);
                pci_read_config_word(adapter->pdev,
-                                       pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+                                    pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
 
-               if (num_vfs > nvfs) {
+               adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+               if (adapter->num_vfs != num_vfs)
                        dev_info(&adapter->pdev->dev,
-                                       "Device supports %d VFs and not %d\n",
-                                       nvfs, num_vfs);
-                       num_vfs = nvfs;
-               }
+                                "Device supports %d VFs and not %d\n",
+                                adapter->num_vfs, num_vfs);
 
-               status = pci_enable_sriov(adapter->pdev, num_vfs);
-               adapter->sriov_enabled = status ? false : true;
+               status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+               if (status)
+                       adapter->num_vfs = 0;
 
-               if (adapter->sriov_enabled) {
+               if (adapter->num_vfs) {
                        adapter->vf_cfg = kcalloc(num_vfs,
                                                sizeof(struct be_vf_cfg),
                                                GFP_KERNEL);
@@ -2154,10 +2152,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
 static void be_sriov_disable(struct be_adapter *adapter)
 {
 #ifdef CONFIG_PCI_IOV
-       if (adapter->sriov_enabled) {
+       if (sriov_enabled(adapter)) {
                pci_disable_sriov(adapter->pdev);
                kfree(adapter->vf_cfg);
-               adapter->sriov_enabled = false;
+               adapter->num_vfs = 0;
        }
 #endif
 }
@@ -2351,8 +2349,8 @@ static int be_close(struct net_device *netdev)
 static int be_rx_queues_setup(struct be_adapter *adapter)
 {
        struct be_rx_obj *rxo;
-       int rc, i;
-       u8 rsstable[MAX_RSS_QS];
+       int rc, i, j;
+       u8 rsstable[128];
 
        for_all_rx_queues(adapter, rxo, i) {
                rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2362,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
        }
 
        if (be_multi_rxq(adapter)) {
-               for_all_rss_queues(adapter, rxo, i)
-                       rsstable[i] = rxo->rss_id;
+               for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+                       for_all_rss_queues(adapter, rxo, i) {
+                               if ((j + i) >= 128)
+                                       break;
+                               rsstable[j + i] = rxo->rss_id;
+                       }
+               }
+               rc = be_cmd_rss_config(adapter, rsstable, 128);
 
-               rc = be_cmd_rss_config(adapter, rsstable,
-                       adapter->num_rx_qs - 1);
                if (rc)
                        return rc;
        }
@@ -2465,19 +2467,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
        u32 vf;
        int status = 0;
        u8 mac[ETH_ALEN];
+       struct be_vf_cfg *vf_cfg;
 
        be_vf_eth_addr_generate(adapter, mac);
 
-       for (vf = 0; vf < num_vfs; vf++) {
-               status = be_cmd_pmac_add(adapter, mac,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       &adapter->vf_cfg[vf].vf_pmac_id,
-                                       vf + 1);
+       for_all_vfs(adapter, vf_cfg, vf) {
+               if (lancer_chip(adapter)) {
+                       status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
+               } else {
+                       status = be_cmd_pmac_add(adapter, mac,
+                                                vf_cfg->if_handle,
+                                                &vf_cfg->pmac_id, vf + 1);
+               }
+
                if (status)
                        dev_err(&adapter->pdev->dev,
-                               "Mac address add failed for VF %d\n", vf);
+                       "Mac address assignment failed for VF %d\n", vf);
                else
-                       memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+                       memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
 
                mac[5] += 1;
        }
@@ -2486,24 +2493,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
 
 static void be_vf_clear(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        u32 vf;
 
-       for (vf = 0; vf < num_vfs; vf++) {
-               if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
-                       be_cmd_pmac_del(adapter,
-                                       adapter->vf_cfg[vf].vf_if_handle,
-                                       adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
-       }
+       for_all_vfs(adapter, vf_cfg, vf) {
+               if (lancer_chip(adapter))
+                       be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+               else
+                       be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+                                       vf_cfg->pmac_id, vf + 1);
 
-       for (vf = 0; vf < num_vfs; vf++)
-               if (adapter->vf_cfg[vf].vf_if_handle)
-                       be_cmd_if_destroy(adapter,
-                               adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+               be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+       }
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-       if (be_physfn(adapter) && adapter->sriov_enabled)
+       if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
@@ -2511,61 +2517,94 @@ static int be_clear(struct be_adapter *adapter)
        be_mcc_queues_destroy(adapter);
        be_rx_queues_destroy(adapter);
        be_tx_queues_destroy(adapter);
-       adapter->eq_next_idx = 0;
-
-       adapter->be3_native = false;
-       adapter->promiscuous = false;
 
        /* tell fw we're done with firing cmds */
        be_cmd_fw_clean(adapter);
        return 0;
 }
 
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+       struct be_vf_cfg *vf_cfg;
+       int vf;
+
+       for_all_vfs(adapter, vf_cfg, vf) {
+               vf_cfg->if_handle = -1;
+               vf_cfg->pmac_id = -1;
+       }
+}
+
 static int be_vf_setup(struct be_adapter *adapter)
 {
+       struct be_vf_cfg *vf_cfg;
        u32 cap_flags, en_flags, vf;
        u16 lnk_speed;
        int status;
 
-       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
-       for (vf = 0; vf < num_vfs; vf++) {
+       be_vf_setup_init(adapter);
+
+       cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+                               BE_IF_FLAGS_MULTICAST;
+       for_all_vfs(adapter, vf_cfg, vf) {
                status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
-                                       &adapter->vf_cfg[vf].vf_if_handle,
-                                       NULL, vf+1);
+                                         &vf_cfg->if_handle, NULL, vf + 1);
                if (status)
                        goto err;
-               adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
        }
 
-       if (!lancer_chip(adapter)) {
-               status = be_vf_eth_addr_config(adapter);
-               if (status)
-                       goto err;
-       }
+       status = be_vf_eth_addr_config(adapter);
+       if (status)
+               goto err;
 
-       for (vf = 0; vf < num_vfs; vf++) {
+       for_all_vfs(adapter, vf_cfg, vf) {
                status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
-                               vf + 1);
+                                                 vf + 1);
                if (status)
                        goto err;
-               adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+               vf_cfg->tx_rate = lnk_speed * 10;
        }
        return 0;
 err:
        return status;
 }
 
+static void be_setup_init(struct be_adapter *adapter)
+{
+       adapter->vlan_prio_bmap = 0xff;
+       adapter->link_speed = -1;
+       adapter->if_handle = -1;
+       adapter->be3_native = false;
+       adapter->promiscuous = false;
+       adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+       u32 pmac_id;
+       int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+       if (status != 0)
+               goto do_none;
+       status = be_cmd_mac_addr_query(adapter, mac,
+                       MAC_ADDRESS_TYPE_NETWORK,
+                       false, adapter->if_handle, pmac_id);
+       if (status != 0)
+               goto do_none;
+       status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+                       &adapter->pmac_id, 0);
+do_none:
+       return status;
+}
+
 static int be_setup(struct be_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        u32 cap_flags, en_flags;
        u32 tx_fc, rx_fc;
-       int status;
+       int status, i;
        u8 mac[ETH_ALEN];
+       struct be_tx_obj *txo;
 
-       /* Allow all priorities by default. A GRP5 evt may modify this */
-       adapter->vlan_prio_bmap = 0xff;
-       adapter->link_speed = -1;
+       be_setup_init(adapter);
 
        be_cmd_req_native_mode(adapter);
 
@@ -2583,7 +2622,7 @@ static int be_setup(struct be_adapter *adapter)
 
        memset(mac, 0, ETH_ALEN);
        status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
-                       true /*permanent */, 0);
+                       true /*permanent */, 0, 0);
        if (status)
                return status;
        memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2631,8 @@ static int be_setup(struct be_adapter *adapter)
        en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
                        BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
        cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
-                       BE_IF_FLAGS_PROMISCUOUS;
+                       BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
        if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
                cap_flags |= BE_IF_FLAGS_RSS;
                en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2643,23 @@ static int be_setup(struct be_adapter *adapter)
        if (status != 0)
                goto err;
 
-       /* For BEx, the VF's permanent mac queried from card is incorrect.
-        * Query the mac configued by the PF using if_handle
-        */
-       if (!be_physfn(adapter) && !lancer_chip(adapter)) {
-               status = be_cmd_mac_addr_query(adapter, mac,
-                       MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+        for_all_tx_queues(adapter, txo, i) {
+               status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+               if (status)
+                       goto err;
+       }
+
+        /* The VF's permanent mac queried from card is incorrect.
+         * For BEx: Query the mac configued by the PF using if_handle
+         * For Lancer: Get and use mac_list to obtain mac address.
+         */
+       if (!be_physfn(adapter)) {
+               if (lancer_chip(adapter))
+                       status = be_configure_mac_from_list(adapter, mac);
+               else
+                       status = be_cmd_mac_addr_query(adapter, mac,
+                                       MAC_ADDRESS_TYPE_NETWORK, false,
+                                       adapter->if_handle, 0);
                if (!status) {
                        memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                        memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2675,21 @@ static int be_setup(struct be_adapter *adapter)
        be_set_rx_mode(adapter->netdev);
 
        status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
-       if (status)
+       /* For Lancer: It is legal for this cmd to fail on VF */
+       if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
                goto err;
+
        if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
                status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
-               if (status)
+               /* For Lancer: It is legal for this cmd to fail on VF */
+               if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
                        goto err;
        }
 
        pcie_set_readrq(adapter->pdev, 4096);
 
-       if (be_physfn(adapter) && adapter->sriov_enabled) {
+       if (sriov_enabled(adapter)) {
                status = be_vf_setup(adapter);
                if (status)
                        goto err;
@@ -2647,6 +2701,19 @@ err:
        return status;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_rx_obj *rxo;
+       int i;
+
+       event_handle(adapter, &adapter->tx_eq, false);
+       for_all_rx_queues(adapter, rxo, i)
+               event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
 #define FW_FILE_HDR_SIGN       "ServerEngines Corp. "
 static bool be_flash_redboot(struct be_adapter *adapter,
                        const u8 *p, u32 img_start, int image_size,
@@ -2995,7 +3062,10 @@ static struct net_device_ops be_netdev_ops = {
        .ndo_set_vf_mac         = be_set_vf_mac,
        .ndo_set_vf_vlan        = be_set_vf_vlan,
        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
-       .ndo_get_vf_config      = be_get_vf_config
+       .ndo_get_vf_config      = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = be_netpoll,
+#endif
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -3267,7 +3337,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
 
 static int lancer_wait_ready(struct be_adapter *adapter)
 {
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
        u32 sliport_status;
        int status = 0, i;
 
@@ -3276,7 +3346,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
                if (sliport_status & SLIPORT_STATUS_RDY_MASK)
                        break;
 
-               msleep(20);
+               msleep(1000);
        }
 
        if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3383,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
        return status;
 }
 
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+       int status;
+       u32 sliport_status;
+
+       if (adapter->eeh_err || adapter->ue_detected)
+               return;
+
+       sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+       if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+               dev_err(&adapter->pdev->dev,
+                               "Adapter in error state."
+                               "Trying to recover.\n");
+
+               status = lancer_test_and_set_rdy_state(adapter);
+               if (status)
+                       goto err;
+
+               netif_device_detach(adapter->netdev);
+
+               if (netif_running(adapter->netdev))
+                       be_close(adapter->netdev);
+
+               be_clear(adapter);
+
+               adapter->fw_timeout = false;
+
+               status = be_setup(adapter);
+               if (status)
+                       goto err;
+
+               if (netif_running(adapter->netdev)) {
+                       status = be_open(adapter->netdev);
+                       if (status)
+                               goto err;
+               }
+
+               netif_device_attach(adapter->netdev);
+
+               dev_err(&adapter->pdev->dev,
+                               "Adapter error recovery succeeded\n");
+       }
+       return;
+err:
+       dev_err(&adapter->pdev->dev,
+                       "Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+       struct be_adapter *adapter =
+               container_of(work, struct be_adapter, work.work);
+       struct be_rx_obj *rxo;
+       int i;
+
+       if (lancer_chip(adapter))
+               lancer_test_and_recover_fn_err(adapter);
+
+       be_detect_dump_ue(adapter);
+
+       /* when interrupts are not yet enabled, just reap any pending
+       * mcc completions */
+       if (!netif_running(adapter->netdev)) {
+               int mcc_compl, status = 0;
+
+               mcc_compl = be_process_mcc(adapter, &status);
+
+               if (mcc_compl) {
+                       struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+                       be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+               }
+
+               goto reschedule;
+       }
+
+       if (!adapter->stats_cmd_sent) {
+               if (lancer_chip(adapter))
+                       lancer_cmd_get_pport_stats(adapter,
+                                               &adapter->stats_cmd);
+               else
+                       be_cmd_get_stats(adapter, &adapter->stats_cmd);
+       }
+
+       for_all_rx_queues(adapter, rxo, i) {
+               be_rx_eqd_update(adapter, rxo);
+
+               if (rxo->rx_post_starved) {
+                       rxo->rx_post_starved = false;
+                       be_post_rx_frags(rxo, GFP_KERNEL);
+               }
+       }
+
+reschedule:
+       adapter->work_counter++;
+       schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
                        const struct pci_device_id *pdev_id)
 {
@@ -3365,7 +3533,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
                goto disable_sriov;
 
        if (lancer_chip(adapter)) {
-               status = lancer_test_and_set_rdy_state(adapter);
+               status = lancer_wait_ready(adapter);
+               if (!status) {
+                       iowrite32(SLI_PORT_CONTROL_IP_MASK,
+                                       adapter->db + SLIPORT_CONTROL_OFFSET);
+                       status = lancer_test_and_set_rdy_state(adapter);
+               }
                if (status) {
                        dev_err(&pdev->dev, "Adapter in non recoverable error\n");
                        goto ctrl_clean;
@@ -3559,6 +3732,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
 
        dev_info(&adapter->pdev->dev, "EEH reset\n");
        adapter->eeh_err = false;
+       adapter->ue_detected = false;
+       adapter->fw_timeout = false;
 
        status = pci_enable_device(pdev);
        if (status)
index 251b635..60f0e78 100644 (file)
@@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = {
        },
 };
 
-static int __init ethoc_init(void)
-{
-       return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
-       platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
 
 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
 MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
index 61d2bdd..c82d444 100644 (file)
@@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
 {
        struct netdev_private *np = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index 5272f9d..820de8b 100644 (file)
@@ -21,7 +21,7 @@ config NET_VENDOR_FREESCALE
 if NET_VENDOR_FREESCALE
 
 config FEC
-       bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+       tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
                   ARCH_MXC || ARCH_MXS)
        default ARCH_MXC || ARCH_MXS if ARM
index c136230..4ea2bdc 100644 (file)
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
 MODULE_DEVICE_TABLE(platform, fec_devtype);
 
 enum imx_fec_type {
-       IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
+       IMX25_FEC = 1,  /* runs on i.mx25/50/53 */
        IMX27_FEC,      /* runs on i.mx27/35/51 */
        IMX28_FEC,
        IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #elif defined (CONFIG_M5272C3)
 #define        FEC_FLASHMAC    (0xffe04000 + 4)
 #elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC   0xffc0406b
+#define FEC_FLASHMAC   0xffc0406b
 #else
 #define        FEC_FLASHMAC    0
 #endif
@@ -260,6 +260,8 @@ struct fec_enet_private {
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static int mii_cnt;
+
 static void *swap_buffer(void *bufaddr, int len)
 {
        int i;
@@ -516,6 +518,7 @@ fec_stop(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
                                platform_get_device_id(fep->pdev);
+       u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
 
        /* We cannot expect a graceful transmit stop without link !!! */
        if (fep->link) {
@@ -532,8 +535,10 @@ fec_stop(struct net_device *ndev)
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 
        /* We have to keep ENET enabled to have MII interrupt stay working */
-       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+       if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
                writel(2, fep->hwp + FEC_ECNTRL);
+               writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+       }
 }
 
 
@@ -819,7 +824,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
                        iap = (unsigned char *)FEC_FLASHMAC;
 #else
                if (pdata)
-                       memcpy(iap, pdata->mac, ETH_ALEN);
+                       iap = (unsigned char *)&pdata->mac;
 #endif
        }
 
@@ -866,6 +871,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
        if (phy_dev->link) {
                if (fep->full_duplex != phy_dev->duplex) {
                        fec_restart(ndev, phy_dev->duplex);
+                       /* prevent unnecessary second fec_restart() below */
+                       fep->link = phy_dev->link;
                        status_change = 1;
                }
        }
@@ -973,8 +980,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        }
 
        if (phy_id >= PHY_MAX_ADDR) {
-               printk(KERN_INFO "%s: no PHY, assuming direct connection "
-                       "to switch\n", ndev->name);
+               printk(KERN_INFO
+                       "%s: no PHY, assuming direct connection to switch\n",
+                       ndev->name);
                strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
                phy_id = 0;
        }
@@ -999,8 +1007,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        fep->link = 0;
        fep->full_duplex = 0;
 
-       printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-               "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+       printk(KERN_INFO
+               "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+               ndev->name,
                fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
                fep->phy_dev->irq);
 
@@ -1034,8 +1043,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         */
        if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
-               fep->mii_bus = fec0_mii_bus;
-               return 0;
+               if (mii_cnt && fec0_mii_bus) {
+                       fep->mii_bus = fec0_mii_bus;
+                       mii_cnt++;
+                       return 0;
+               }
+               return -ENOENT;
        }
 
        fep->mii_timeout = 0;
@@ -1080,6 +1093,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        if (mdiobus_register(fep->mii_bus))
                goto err_out_free_mdio_irq;
 
+       mii_cnt++;
+
        /* save fec0 mii_bus */
        if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
                fec0_mii_bus = fep->mii_bus;
@@ -1096,11 +1111,11 @@ err_out:
 
 static void fec_enet_mii_remove(struct fec_enet_private *fep)
 {
-       if (fep->phy_dev)
-               phy_disconnect(fep->phy_dev);
-       mdiobus_unregister(fep->mii_bus);
-       kfree(fep->mii_bus->irq);
-       mdiobus_free(fep->mii_bus);
+       if (--mii_cnt == 0) {
+               mdiobus_unregister(fep->mii_bus);
+               kfree(fep->mii_bus->irq);
+               mdiobus_free(fep->mii_bus);
+       }
 }
 
 static int fec_enet_get_settings(struct net_device *ndev,
@@ -1574,8 +1589,12 @@ fec_probe(struct platform_device *pdev)
 
        for (i = 0; i < FEC_IRQ_NUM; i++) {
                irq = platform_get_irq(pdev, i);
-               if (i && irq < 0)
-                       break;
+               if (irq < 0) {
+                       if (i)
+                               break;
+                       ret = irq;
+                       goto failed_irq;
+               }
                ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
                if (ret) {
                        while (--i >= 0) {
@@ -1586,7 +1605,7 @@ fec_probe(struct platform_device *pdev)
                }
        }
 
-       fep->clk = clk_get(&pdev->dev, "fec_clk");
+       fep->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(fep->clk)) {
                ret = PTR_ERR(fep->clk);
                goto failed_clk;
@@ -1638,13 +1657,18 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct resource *r;
+       int i;
 
-       fec_stop(ndev);
+       unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
+       for (i = 0; i < FEC_IRQ_NUM; i++) {
+               int irq = platform_get_irq(pdev, i);
+               if (irq > 0)
+                       free_irq(irq, ndev);
+       }
        clk_disable(fep->clk);
        clk_put(fep->clk);
        iounmap(fep->hwp);
-       unregister_netdev(ndev);
        free_netdev(ndev);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 5bf5471..910a8e1 100644 (file)
@@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = {
        .remove = fs_enet_remove,
 };
 
-static int __init fs_init(void)
-{
-       return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
-       platform_driver_unregister(&fs_enet_driver);
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void fs_enet_netpoll(struct net_device *dev)
 {
@@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev)
 }
 #endif
 
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
index b09270b..0f2d1a7 100644 (file)
@@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
        .remove = fs_enet_mdio_remove,
 };
 
-static int fs_enet_mdio_bb_init(void)
-{
-       return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
-       platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
index e0e9d6c..55bb867 100644 (file)
@@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
        .remove = fs_enet_mdio_remove,
 };
 
-static int fs_enet_mdio_fec_init(void)
-{
-       return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
-       platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
index 4d9f84b..9eb8159 100644 (file)
@@ -360,12 +360,11 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 
        if (tbiaddr == -1) {
                err = -EBUSY;
-
                goto err_free_irqs;
+       } else {
+               out_be32(tbipa, tbiaddr);
        }
 
-       out_be32(tbipa, tbiaddr);
-
        err = of_mdiobus_register(new_bus, np);
        if (err) {
                printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -443,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = {
        .remove = fsl_pq_mdio_remove,
 };
 
-int __init fsl_pq_mdio_init(void)
-{
-       return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
 
-void fsl_pq_mdio_exit(void)
-{
-       platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
 MODULE_LICENSE("GPL");
index 83199fd..e01cdaa 100644 (file)
@@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 
        mac_addr = of_get_mac_address(np);
        if (mac_addr)
-               memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        if (model && !strcasecmp(model, "TSEC"))
                priv->device_flags =
@@ -2306,7 +2306,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
 }
 
 /* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = NULL;
@@ -3114,7 +3114,7 @@ static void gfar_set_multi(struct net_device *dev)
 static void gfar_clear_exact_match(struct net_device *dev)
 {
        int idx;
-       static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+       static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
        for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
                gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3137,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
 {
        u32 tempval;
        struct gfar_private *priv = netdev_priv(dev);
-       u32 result = ether_crc(MAC_ADDR_LEN, addr);
+       u32 result = ether_crc(ETH_ALEN, addr);
        int width = priv->hash_width;
        u8 whichbit = (result >> (32 - width)) & 0x1f;
        u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3158,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        int idx;
-       char tmpbuf[MAC_ADDR_LEN];
+       char tmpbuf[ETH_ALEN];
        u32 tempval;
        u32 __iomem *macptr = &regs->macstnaddr1;
 
@@ -3166,8 +3166,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 
        /* Now copy it into the mac registers backwards, cuz */
        /* little endian is silly */
-       for (idx = 0; idx < MAC_ADDR_LEN; idx++)
-               tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+       for (idx = 0; idx < ETH_ALEN; idx++)
+               tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
 
        gfar_write(macptr, *((u32 *) (tmpbuf)));
 
@@ -3281,16 +3281,4 @@ static struct platform_driver gfar_driver = {
        .remove = gfar_remove,
 };
 
-static int __init gfar_init(void)
-{
-       return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
-       platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
index 9aa4377..fe7ac3a 100644 (file)
@@ -74,9 +74,6 @@ struct ethtool_rx_list {
  * will be the next highest multiple of 512 bytes. */
 #define INCREMENTAL_BUFFER_SIZE 512
 
-
-#define MAC_ADDR_LEN 6
-
 #define PHY_INIT_TIMEOUT 100000
 #define GFAR_PHY_CHANGE_TIME 2
 
@@ -1179,9 +1176,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
 extern void gfar_configure_coalescing(struct gfar_private *priv,
                unsigned long tx_mask, unsigned long rx_mask);
 void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
 extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 212736b..5890f4b 100644 (file)
@@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
        return err;
 }
 
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct gfar_private *priv = netdev_priv(dev);
        unsigned long flags;
        int err = 0, i = 0;
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
                gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
 
        /* We need a copy of the filer table because
         * we want to change its order */
-       temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+       temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
        if (temp_table == NULL)
                return -ENOMEM;
-       memcpy(temp_table, tab, sizeof(*temp_table));
 
        mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
                        sizeof(struct gfar_mask_entry), GFP_KERNEL);
index f67b8ae..83e0ed7 100644 (file)
@@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = {
        .remove      = gianfar_ptp_remove,
 };
 
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
-       return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
-       platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
 
 MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
 MODULE_DESCRIPTION("PTP clock using the eTSEC");
index b5dc027..ba2dc08 100644 (file)
@@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
 
 static inline int compare_addr(u8 **addr1, u8 **addr2)
 {
-       return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+       return memcmp(addr1, addr2, ETH_ALEN);
 }
 
 #ifdef DEBUG
index d12fcad..2e395a2 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/if_ether.h>
 
 #include <asm/immap_qe.h>
 #include <asm/qe.h>
@@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics {
 #define TX_RING_MOD_MASK(size)                  (size-1)
 #define RX_RING_MOD_MASK(size)                  (size-1)
 
-#define ENET_NUM_OCTETS_PER_ADDRESS             6
 #define ENET_GROUP_ADDR                         0x01   /* Group address mask
                                                           for ethernet
                                                           addresses */
@@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses {
 
 /* UCC GETH 82xx Ethernet Address Container */
 struct enet_addr_container {
-       u8 address[ENET_NUM_OCTETS_PER_ADDRESS];        /* ethernet address */
+       u8 address[ETH_ALEN];   /* ethernet address */
        enum ucc_geth_enet_address_recognition_location location;       /* location in
                                                                   82xx address
                                                                   recognition
@@ -1194,7 +1194,7 @@ struct ucc_geth_private {
        u16 cpucount[NUM_TX_QUEUES];
        u16 __iomem *p_cpucount[NUM_TX_QUEUES];
        int indAddrRegUsed[NUM_OF_PADDRS];
-       u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS];   /* ethernet address */
+       u8 paddr[NUM_OF_PADDRS][ETH_ALEN];      /* ethernet address */
        u8 numGroupAddrInHash;
        u8 numIndAddrInHash;
        u8 numIndAddrInReg;
index 1541675..ee84b47 100644 (file)
@@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       snprintf(info->bus_info, sizeof(info->bus_info),
+               "PCMCIA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
index 067c460..114cda7 100644 (file)
@@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
 static void eepro_ethtool_get_drvinfo(struct net_device *dev,
                                        struct ethtool_drvinfo *drvinfo)
 {
-       strcpy(drvinfo->driver, DRV_NAME);
-       strcpy(drvinfo->version, DRV_VERSION);
-       sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+       snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+               "ISA 0x%lx", dev->base_addr);
 }
 
 static const struct ethtool_ops eepro_ethtool_ops = {
index bfeccbf..3554414 100644 (file)
@@ -2114,17 +2114,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2132,6 +2134,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2140,24 +2143,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
-       return;
+       return err;
 }
 
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
        struct hcp_ehea_port_cb1 *cb1;
        int index;
        u64 hret;
+       int err = 0;
 
        cb1 = (void *)get_zeroed_page(GFP_KERNEL);
        if (!cb1) {
                pr_err("no mem for cb1\n");
+               err = -ENOMEM;
                goto out;
        }
 
@@ -2165,6 +2172,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                                      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
        if (hret != H_SUCCESS) {
                pr_err("query_ehea_port failed\n");
+               err = -EINVAL;
                goto out;
        }
 
@@ -2173,10 +2181,13 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 
        hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
-       if (hret != H_SUCCESS)
+       if (hret != H_SUCCESS) {
                pr_err("modify_ehea_port failed\n");
+               err = -EINVAL;
+       }
 out:
        free_page((unsigned long)cb1);
+       return err;
 }
 
 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
index ed79b2d..2abce96 100644 (file)
@@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev)
        if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
                zmii_detach(dev->zmii_dev, dev->zmii_port);
 
+       busy_phy_map &= ~(1 << dev->phy.address);
+       DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
        mal_unregister_commac(dev->mal, &dev->commac);
        emac_put_deps(dev);
 
index b1cd41b..e877371 100644 (file)
@@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
                sizeof(info->version) - 1);
 }
 
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /*
         * Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        return rc1 ? rc1 : rc2;
 }
 
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        int rx_csum = !!(features & NETIF_F_RXCSUM);
index 8fd80a0..075451d 100644 (file)
@@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
        }
 
        /* The last cycle is a tri-state, so read from the PHY. */
-       for (j = 7; j < 8; j++) {
-               for (i = 0; i < p[j].len; i++) {
-                       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
-                       p[j].field |= ((ipg_r8(PHY_CTRL) &
-                               IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
-                       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
-               }
-       }
+       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+       ipg_r8(PHY_CTRL);
+       ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
 }
 
 static void ipg_set_led_mode(struct net_device *dev)
index 5a2fdf7..9436397 100644 (file)
@@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev,
        struct ethtool_drvinfo *info)
 {
        struct nic *nic = netdev_priv(netdev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(nic->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(nic->pdev),
+               sizeof(info->bus_info));
 }
 
 #define E100_PHY_REGS 0x1C
index 2b223ac..3103f0b 100644 (file)
@@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
                              struct ethtool_drvinfo *drvinfo)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
 
-       strncpy(drvinfo->driver,  e1000_driver_name, 32);
-       strncpy(drvinfo->version, e1000_driver_version, 32);
+       strlcpy(drvinfo->driver,  e1000_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, e1000_driver_version,
+               sizeof(drvinfo->version));
 
-       sprintf(firmware_version, "N/A");
-       strncpy(drvinfo->fw_version, firmware_version, 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = e1000_get_regs_len(netdev);
        drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
index 5c9a840..cf7e3c0 100644 (file)
@@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
 #define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
 
 #define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
 
 /* MAC decode size is 128K - This is the size of BAR0 */
 #define MAC_DECODE_SIZE (128 * 1024)
index cf480b5..053f012 100644 (file)
@@ -167,9 +167,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                        struct sk_buff *skb);
 
 static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+                           netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 
 #ifdef CONFIG_PM
@@ -806,7 +807,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
        }
 }
 
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -820,10 +822,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                e1000_vlan_mode(netdev, features);
@@ -4577,7 +4580,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
                e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4604,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
                e1000_irq_enable(adapter);
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4613,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if ((hw->mng_cookie.status &
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
            (vid == adapter->mng_vlan_id))
-               return;
+               return 0;
 
        if (!e1000_vlan_used(adapter))
                e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4625,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        e1000_write_vfta(hw, index, vfta);
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4650,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 
        if (!e1000_vlan_used(adapter))
                e1000_vlan_filter_on_off(adapter, false);
+
+       return 0;
 }
 
 static void e1000_restore_vlan(struct e1000_adapter *adapter)
index 9fe18d1..f478a22 100644 (file)
@@ -309,6 +309,7 @@ struct e1000_adapter {
        u32 txd_cmd;
 
        bool detect_tx_hung;
+       bool tx_hang_recheck;
        u8 tx_timeout_factor;
 
        u32 tx_int_delay;
index 69c9d21..fb2c28e 100644 (file)
@@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
                              struct ethtool_drvinfo *drvinfo)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
 
-       strncpy(drvinfo->driver,  e1000e_driver_name,
-               sizeof(drvinfo->driver) - 1);
-       strncpy(drvinfo->version, e1000e_driver_version,
-               sizeof(drvinfo->version) - 1);
+       strlcpy(drvinfo->driver,  e1000e_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, e1000e_driver_version,
+               sizeof(drvinfo->version));
 
        /*
         * EEPROM image version # is reported as firmware version # for
         * PCI-E controllers
         */
-       snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d-%d",
                (adapter->eeprom_vers & 0xF000) >> 12,
                (adapter->eeprom_vers & 0x0FF0) >> 4,
                (adapter->eeprom_vers & 0x000F));
 
-       strncpy(drvinfo->fw_version, firmware_version,
-               sizeof(drvinfo->fw_version) - 1);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-               sizeof(drvinfo->bus_info) - 1);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = e1000_get_regs_len(netdev);
        drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
index a855db1..90953b4 100644 (file)
@@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
                        regs[n] = __er32(hw, E1000_TARC(n));
                break;
        default:
-               printk(KERN_INFO "%-15s %08x\n",
-                      reginfo->name, __er32(hw, reginfo->ofs));
+               pr_info("%-15s %08x\n",
+                       reginfo->name, __er32(hw, reginfo->ofs));
                return;
        }
 
        snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
-       printk(KERN_INFO "%-15s ", rname);
-       for (n = 0; n < 2; n++)
-               printk(KERN_CONT "%08x ", regs[n]);
-       printk(KERN_CONT "\n");
+       pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 }
 
 /*
@@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               printk(KERN_INFO "Device Name     state            "
-                      "trans_start      last_rx\n");
-               printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-                      netdev->name, netdev->state, netdev->trans_start,
-                      netdev->last_rx);
+               pr_info("Device Name     state            trans_start      last_rx\n");
+               pr_info("%-15s %016lX %016lX %016lX\n",
+                       netdev->name, netdev->state, netdev->trans_start,
+                       netdev->last_rx);
        }
 
        /* Print Registers */
        dev_info(&adapter->pdev->dev, "Register Dump\n");
-       printk(KERN_INFO " Register Name   Value\n");
+       pr_info(" Register Name   Value\n");
        for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
             reginfo->name; reginfo++) {
                e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter)
                goto exit;
 
        dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-              " leng ntw timestamp\n");
+       pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
        buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
-       printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
-              0, tx_ring->next_to_use, tx_ring->next_to_clean,
-              (unsigned long long)buffer_info->dma,
-              buffer_info->length,
-              buffer_info->next_to_watch,
-              (unsigned long long)buffer_info->time_stamp);
+       pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+               0, tx_ring->next_to_use, tx_ring->next_to_clean,
+               (unsigned long long)buffer_info->dma,
+               buffer_info->length,
+               buffer_info->next_to_watch,
+               (unsigned long long)buffer_info->time_stamp);
 
        /* Print Tx Ring */
        if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter)
         *   +----------------------------------------------------------------+
         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
         */
-       printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
-              " [bi->dma       ] leng  ntw timestamp        bi->skb "
-              "<-- Legacy format\n");
-       printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
-              " [bi->dma       ] leng  ntw timestamp        bi->skb "
-              "<-- Ext Context format\n");
-       printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
-              " [bi->dma       ] leng  ntw timestamp        bi->skb "
-              "<-- Ext Data format\n");
+       pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
+       pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
+       pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+               const char *next_desc;
                tx_desc = E1000_TX_DESC(*tx_ring, i);
                buffer_info = &tx_ring->buffer_info[i];
                u0 = (struct my_u0 *)tx_desc;
-               printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
-                      "%04X  %3X %016llX %p",
-                      (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
-                       ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
-                      (unsigned long long)le64_to_cpu(u0->a),
-                      (unsigned long long)le64_to_cpu(u0->b),
-                      (unsigned long long)buffer_info->dma,
-                      buffer_info->length, buffer_info->next_to_watch,
-                      (unsigned long long)buffer_info->time_stamp,
-                      buffer_info->skb);
                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
-                       printk(KERN_CONT " NTC/U\n");
+                       next_desc = " NTC/U";
                else if (i == tx_ring->next_to_use)
-                       printk(KERN_CONT " NTU\n");
+                       next_desc = " NTU";
                else if (i == tx_ring->next_to_clean)
-                       printk(KERN_CONT " NTC\n");
+                       next_desc = " NTC";
                else
-                       printk(KERN_CONT "\n");
+                       next_desc = "";
+               pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
+                       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+                        ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+                       i,
+                       (unsigned long long)le64_to_cpu(u0->a),
+                       (unsigned long long)le64_to_cpu(u0->b),
+                       (unsigned long long)buffer_info->dma,
+                       buffer_info->length, buffer_info->next_to_watch,
+                       (unsigned long long)buffer_info->time_stamp,
+                       buffer_info->skb, next_desc);
 
                if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
                        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
        /* Print Rx Ring Summary */
 rx_ring_summary:
        dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC]\n");
-       printk(KERN_INFO " %5d %5X %5X\n", 0,
-              rx_ring->next_to_use, rx_ring->next_to_clean);
+       pr_info("Queue [NTU] [NTC]\n");
+       pr_info(" %5d %5X %5X\n",
+               0, rx_ring->next_to_use, rx_ring->next_to_clean);
 
        /* Print Rx Ring */
        if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@ rx_ring_summary:
                 * 24 |                Buffer Address 3 [63:0]              |
                 *    +-----------------------------------------------------+
                 */
-               printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
-                      "[buffer 1 63:0 ] "
-                      "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
-                      "[bi->skb] <-- Ext Pkt Split format\n");
+               pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
                /* [Extended] Receive Descriptor (Write-Back) Format
                 *
                 *   63       48 47    32 31     13 12    8 7    4 3        0
@@ -352,35 +339,40 @@ rx_ring_summary:
                 *   +------------------------------------------------------+
                 *   63       48 47    32 31            20 19               0
                 */
-               printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
-                      "[vl   l0 ee  es] "
-                      "[ l3  l2  l1 hs] [reserved      ] ---------------- "
-                      "[bi->skb] <-- Ext Rx Write-Back format\n");
+               pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
                for (i = 0; i < rx_ring->count; i++) {
+                       const char *next_desc;
                        buffer_info = &rx_ring->buffer_info[i];
                        rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
                        u1 = (struct my_u1 *)rx_desc_ps;
                        staterr =
                            le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+                       if (i == rx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == rx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
                        if (staterr & E1000_RXD_STAT_DD) {
                                /* Descriptor Done */
-                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
-                                      "%016llX %016llX %016llX "
-                                      "---------------- %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      (unsigned long long)le64_to_cpu(u1->c),
-                                      (unsigned long long)le64_to_cpu(u1->d),
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+                                       "RWB", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       (unsigned long long)le64_to_cpu(u1->c),
+                                       (unsigned long long)le64_to_cpu(u1->d),
+                                       buffer_info->skb, next_desc);
                        } else {
-                               printk(KERN_INFO "R  [0x%03X]     %016llX "
-                                      "%016llX %016llX %016llX %016llX %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      (unsigned long long)le64_to_cpu(u1->c),
-                                      (unsigned long long)le64_to_cpu(u1->d),
-                                      (unsigned long long)buffer_info->dma,
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
+                                       "R  ", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       (unsigned long long)le64_to_cpu(u1->c),
+                                       (unsigned long long)le64_to_cpu(u1->d),
+                                       (unsigned long long)buffer_info->dma,
+                                       buffer_info->skb, next_desc);
 
                                if (netif_msg_pktdata(adapter))
                                        print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@ rx_ring_summary:
                                                phys_to_virt(buffer_info->dma),
                                                adapter->rx_ps_bsize0, true);
                        }
-
-                       if (i == rx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
                }
                break;
        default:
@@ -407,9 +392,7 @@ rx_ring_summary:
                 * 8 |                      Reserved                       |
                 *   +-----------------------------------------------------+
                 */
-               printk(KERN_INFO "R  [desc]      [buf addr 63:0 ] "
-                      "[reserved 63:0 ] [bi->dma       ] "
-                      "[bi->skb] <-- Ext (Read) format\n");
+               pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
                /* Extended Receive Descriptor (Write-Back) Format
                 *
                 *   63       48 47    32 31    24 23            4 3        0
@@ -423,29 +406,37 @@ rx_ring_summary:
                 *   +------------------------------------------------------+
                 *   63       48 47    32 31            20 19               0
                 */
-               printk(KERN_INFO "RWB[desc]      [cs ipid    mrq] "
-                      "[vt   ln xe  xs] "
-                      "[bi->skb] <-- Ext (Write-Back) format\n");
+               pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
 
                for (i = 0; i < rx_ring->count; i++) {
+                       const char *next_desc;
+
                        buffer_info = &rx_ring->buffer_info[i];
                        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
                        u1 = (struct my_u1 *)rx_desc;
                        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+                       if (i == rx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == rx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
                        if (staterr & E1000_RXD_STAT_DD) {
                                /* Descriptor Done */
-                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
-                                      "%016llX ---------------- %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
+                                       "RWB", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       buffer_info->skb, next_desc);
                        } else {
-                               printk(KERN_INFO "R  [0x%03X]     %016llX "
-                                      "%016llX %016llX %p", i,
-                                      (unsigned long long)le64_to_cpu(u1->a),
-                                      (unsigned long long)le64_to_cpu(u1->b),
-                                      (unsigned long long)buffer_info->dma,
-                                      buffer_info->skb);
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
+                                       "R  ", i,
+                                       (unsigned long long)le64_to_cpu(u1->a),
+                                       (unsigned long long)le64_to_cpu(u1->b),
+                                       (unsigned long long)buffer_info->dma,
+                                       buffer_info->skb, next_desc);
 
                                if (netif_msg_pktdata(adapter))
                                        print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@ rx_ring_summary:
                                                       adapter->rx_buffer_len,
                                                       true);
                        }
-
-                       if (i == rx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
                }
        }
 
@@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
                                                     print_hang_task);
+       struct net_device *netdev = adapter->netdev;
        struct e1000_ring *tx_ring = adapter->tx_ring;
        unsigned int i = tx_ring->next_to_clean;
        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
        if (test_bit(__E1000_DOWN, &adapter->state))
                return;
 
+       if (!adapter->tx_hang_recheck &&
+           (adapter->flags2 & FLAG2_DMA_BURST)) {
+               /* May be block on write-back, flush and detect again
+                * flush pending descriptor writebacks to memory
+                */
+               ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+               /* execute the writes immediately */
+               e1e_flush();
+               adapter->tx_hang_recheck = true;
+               return;
+       }
+       /* Real hang detected */
+       adapter->tx_hang_recheck = false;
+       netif_stop_queue(netdev);
+
        e1e_rphy(hw, PHY_STATUS, &phy_status);
        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
        unsigned int i, eop;
        unsigned int count = 0;
        unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                        if (cleaned) {
                                total_tx_packets += buffer_info->segs;
                                total_tx_bytes += buffer_info->bytecount;
+                               if (buffer_info->skb) {
+                                       bytes_compl += buffer_info->skb->len;
+                                       pkts_compl++;
+                               }
                        }
 
                        e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
 
        tx_ring->next_to_clean = i;
 
+       netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
 #define TX_WAKE_THRESHOLD 32
        if (count && netif_carrier_ok(netdev) &&
            e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1154,10 +1161,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                if (tx_ring->buffer_info[i].time_stamp &&
                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
                               + (adapter->tx_timeout_factor * HZ)) &&
-                   !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+                   !(er32(STATUS) & E1000_STATUS_TXOFF))
                        schedule_work(&adapter->print_hang_task);
-                       netif_stop_queue(netdev);
-               }
+               else
+                       adapter->tx_hang_recheck = false;
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
@@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                        adapter->flags2 |= FLAG2_IS_DISCARDING;
 
                if (adapter->flags2 & FLAG2_IS_DISCARDING) {
-                       e_dbg("Packet Split buffers didn't pick up the full "
-                             "packet\n");
+                       e_dbg("Packet Split buffers didn't pick up the full packet\n");
                        dev_kfree_skb_irq(skb);
                        if (staterr & E1000_RXD_STAT_EOP)
                                adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                length = le16_to_cpu(rx_desc->wb.middle.length0);
 
                if (!length) {
-                       e_dbg("Last part of the packet spanning multiple "
-                             "descriptors\n");
+                       e_dbg("Last part of the packet spanning multiple descriptors\n");
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
                }
@@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                                        return;
                        }
                        /* MSI-X failed, so fall through and try MSI */
-                       e_err("Failed to initialize MSI-X interrupts.  "
-                             "Falling back to MSI interrupts.\n");
+                       e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
                        e1000e_reset_interrupt_capability(adapter);
                }
                adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                        adapter->flags |= FLAG_MSI_ENABLED;
                } else {
                        adapter->int_mode = E1000E_INT_MODE_LEGACY;
-                       e_err("Failed to initialize MSI interrupts.  Falling "
-                             "back to legacy interrupts.\n");
+                       e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
                }
                /* Fall through */
        case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
                e1000_put_txbuf(adapter, buffer_info);
        }
 
+       netdev_reset_queue(adapter->netdev);
        size = sizeof(struct e1000_buffer) * tx_ring->count;
        memset(tx_ring->buffer_info, 0, size);
 
@@ -2518,7 +2522,7 @@ clean_rx:
        return work_done;
 }
 
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if ((adapter->hw.mng_cookie.status &
             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
            (vid == adapter->mng_vlan_id))
-               return;
+               return 0;
 
        /* add VID to filter table */
        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        }
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
            (vid == adapter->mng_vlan_id)) {
                /* release control to f/w */
                e1000e_release_hw_control(adapter);
-               return;
+               return 0;
        }
 
        /* remove VID from filter table */
@@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 /**
@@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 }
 
 /**
- *  e1000_update_mc_addr_list - Update Multicast addresses
- *  @hw: pointer to the HW structure
- *  @mc_addr_list: array of multicast addresses to program
- *  @mc_addr_count: number of multicast addresses to program
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ *                0 on no addresses written
+ *                X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       struct netdev_hw_addr *ha;
+       u8 *mta_list;
+       int i;
+
+       if (netdev_mc_empty(netdev)) {
+               /* nothing to program, so clear mc list */
+               hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+               return 0;
+       }
+
+       mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+       if (!mta_list)
+               return -ENOMEM;
+
+       /* update_mc_addr_list expects a packed array of only addresses. */
+       i = 0;
+       netdev_for_each_mc_addr(ha, netdev)
+               memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+       hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+       kfree(mta_list);
+
+       return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
  *
- *  Updates the Multicast Table Array.
- *  The caller must have a packed mc_addr_list of multicast addresses.
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
  **/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
-                                     u32 mc_addr_count)
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
 {
-       hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int rar_entries = hw->mac.rar_entry_count;
+       int count = 0;
+
+       /* save a rar entry for our hardware address */
+       rar_entries--;
+
+       /* save a rar entry for the LAA workaround */
+       if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+               rar_entries--;
+
+       /* return ENOMEM indicating insufficient memory for addresses */
+       if (netdev_uc_count(netdev) > rar_entries)
+               return -ENOMEM;
+
+       if (!netdev_uc_empty(netdev) && rar_entries) {
+               struct netdev_hw_addr *ha;
+
+               /*
+                * write the addresses in reverse order to avoid write
+                * combining
+                */
+               netdev_for_each_uc_addr(ha, netdev) {
+                       if (!rar_entries)
+                               break;
+                       e1000e_rar_set(hw, ha->addr, rar_entries--);
+                       count++;
+               }
+       }
+
+       /* zero out the remaining RAR entries not used above */
+       for (; rar_entries > 0; rar_entries--) {
+               ew32(RAH(rar_entries), 0);
+               ew32(RAL(rar_entries), 0);
+       }
+       e1e_flush();
+
+       return count;
 }
 
 /**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
  * @netdev: network interface device structure
  *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated.  This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
  * promiscuous mode, and all-multi behavior.
  **/
-static void e1000_set_multi(struct net_device *netdev)
+static void e1000e_set_rx_mode(struct net_device *netdev)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       struct netdev_hw_addr *ha;
-       u8  *mta_list;
        u32 rctl;
 
        /* Check for Promiscuous and All Multicast modes */
-
        rctl = er32(RCTL);
 
+       /* clear the affected bits */
+       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
        if (netdev->flags & IFF_PROMISC) {
                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
-               rctl &= ~E1000_RCTL_VFE;
                /* Do not hardware filter VLANs in promisc mode */
                e1000e_vlan_filter_disable(adapter);
        } else {
+               int count;
                if (netdev->flags & IFF_ALLMULTI) {
                        rctl |= E1000_RCTL_MPE;
-                       rctl &= ~E1000_RCTL_UPE;
                } else {
-                       rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+                       /*
+                        * Write addresses to the MTA, if the attempt fails
+                        * then we should just turn on promiscuous mode so
+                        * that we can at least receive multicast traffic
+                        */
+                       count = e1000e_write_mc_addr_list(netdev);
+                       if (count < 0)
+                               rctl |= E1000_RCTL_MPE;
                }
                e1000e_vlan_filter_enable(adapter);
-       }
-
-       ew32(RCTL, rctl);
-
-       if (!netdev_mc_empty(netdev)) {
-               int i = 0;
-
-               mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
-               if (!mta_list)
-                       return;
-
-               /* prepare a packed array of only addresses. */
-               netdev_for_each_mc_addr(ha, netdev)
-                       memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
-               e1000_update_mc_addr_list(hw, mta_list, i);
-               kfree(mta_list);
-       } else {
                /*
-                * if we're called from probe, we might not have
-                * anything to do here, so clear out the list
+                * Write addresses to available RAR registers, if there is not
+                * sufficient space to store all the addresses then enable
+                * unicast promiscuous mode
                 */
-               e1000_update_mc_addr_list(hw, NULL, 0);
+               count = e1000e_write_uc_addr_list(netdev);
+               if (count < 0)
+                       rctl |= E1000_RCTL_UPE;
        }
 
+       ew32(RCTL, rctl);
+
        if (netdev->features & NETIF_F_HW_VLAN_RX)
                e1000e_vlan_strip_enable(adapter);
        else
@@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev)
  **/
 static void e1000_configure(struct e1000_adapter *adapter)
 {
-       e1000_set_multi(adapter->netdev);
+       e1000e_set_rx_mode(adapter->netdev);
 
        e1000_restore_vlan(adapter);
        e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
 
        clear_bit(__E1000_DOWN, &adapter->state);
 
-       napi_enable(&adapter->napi);
        if (adapter->msix_entries)
                e1000_configure_msix(adapter);
        e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
        e1e_flush();
        usleep_range(10000, 20000);
 
-       napi_disable(&adapter->napi);
        e1000_irq_disable(adapter);
 
        del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
 
        e1000_irq_enable(adapter);
 
+       adapter->tx_hang_recheck = false;
        netif_start_queue(netdev);
 
        adapter->idle_check = true;
@@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
 
        pm_runtime_get_sync(&pdev->dev);
 
+       napi_disable(&adapter->napi);
+
        if (!test_bit(__E1000_DOWN, &adapter->state)) {
                e1000e_down(adapter);
                e1000_free_irq(adapter);
@@ -4168,16 +4245,13 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
        u32 ctrl = er32(CTRL);
 
        /* Link status message must follow this format for user tools */
-       printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
-              "Flow Control: %s\n",
-              adapter->netdev->name,
-              adapter->link_speed,
-              (adapter->link_duplex == FULL_DUPLEX) ?
-              "Full Duplex" : "Half Duplex",
-              ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-              "Rx/Tx" :
-              ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
-               ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+       printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+               adapter->netdev->name,
+               adapter->link_speed,
+               adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+               (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+               (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+               (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4323,10 +4397,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                                e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
 
                                if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
-                                       e_info("Autonegotiated half duplex but"
-                                              " link partner cannot autoneg. "
-                                              " Try forcing full duplex if "
-                                              "link gets many collisions.\n");
+                                       e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
                        }
 
                        /* adjust timeout factor according to speed/duplex */
@@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
        /* if count is 0 then mapping error has occurred */
        count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
        if (count) {
+               netdev_sent_queue(netdev, skb->len);
                e1000_tx_queue(adapter, tx_flags, count);
                /* Make sure there is space in the ring for the next send. */
                e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        if ((adapter->hw.mac.type == e1000_pch2lan) &&
            !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
            (new_mtu > ETH_DATA_LEN)) {
-               e_err("Jumbo Frames not supported on 82579 when CRC "
-                     "stripping is disabled.\n");
+               e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
                return -EINVAL;
        }
 
@@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
 
        if (wufc) {
                e1000_setup_rctl(adapter);
-               e1000_set_multi(netdev);
+               e1000e_set_rx_mode(netdev);
 
                /* turn on all-multi mode if wake on multicast is enabled */
                if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev)
                                phy_data & E1000_WUS_MC ? "Multicast Packet" :
                                phy_data & E1000_WUS_BC ? "Broadcast Packet" :
                                phy_data & E1000_WUS_MAG ? "Magic Packet" :
-                               phy_data & E1000_WUS_LNKC ? "Link Status "
-                               " Change" : "other");
+                               phy_data & E1000_WUS_LNKC ?
+                               "Link Status Change" : "other");
                }
                e1e_wphy(&adapter->hw, BM_WUS, ~0);
        } else {
@@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
        }
 }
 
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
                adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
        .ndo_stop               = e1000_close,
        .ndo_start_xmit         = e1000_xmit_frame,
        .ndo_get_stats64        = e1000e_get_stats64,
-       .ndo_set_rx_mode        = e1000_set_multi,
+       .ndo_set_rx_mode        = e1000e_set_rx_mode,
        .ndo_set_mac_address    = e1000_set_mac,
        .ndo_change_mtu         = e1000_change_mtu,
        .ndo_do_ioctl           = e1000_ioctl,
@@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                        err = dma_set_coherent_mask(&pdev->dev,
                                                    DMA_BIT_MASK(32));
                        if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
+                               dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
                                goto err_dma;
                        }
                }
@@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                                  NETIF_F_TSO6 |
                                  NETIF_F_HW_CSUM);
 
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+
        if (pci_using_dac) {
                netdev->features |= NETIF_F_HIGHDMA;
                netdev->vlan_features |= NETIF_F_HIGHDMA;
index 7881fb9..b8e20f0 100644 (file)
@@ -29,6 +29,8 @@
  * e1000_82576
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
@@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
         * Check for invalid size
         */
        if ((hw->mac.type == e1000_82576) && (size > 15)) {
-               printk("igb: The NVM size is not valid, "
-                       "defaulting to 32K.\n");
+               pr_notice("The NVM size is not valid, defaulting to 32K\n");
                size = 15;
        }
        nvm->word_size = 1 << size;
index 43873eb..e9335ef 100644 (file)
@@ -673,25 +673,22 @@ static void igb_get_drvinfo(struct net_device *netdev,
                            struct ethtool_drvinfo *drvinfo)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
        u16 eeprom_data;
 
-       strncpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver) - 1);
-       strncpy(drvinfo->version, igb_driver_version,
-               sizeof(drvinfo->version) - 1);
+       strlcpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
 
        /* EEPROM image version # is reported as firmware version # for
         * 82575 controllers */
        adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
-       sprintf(firmware_version, "%d.%d-%d",
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d-%d",
                (eeprom_data & 0xF000) >> 12,
                (eeprom_data & 0x0FF0) >> 4,
                eeprom_data & 0x000F);
 
-       strncpy(drvinfo->fw_version, firmware_version,
-               sizeof(drvinfo->fw_version) - 1);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-               sizeof(drvinfo->bus_info) - 1);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = IGB_STATS_LEN;
        drvinfo->testinfo_len = IGB_TEST_LEN;
        drvinfo->regdump_len = igb_get_regs_len(netdev);
index ced5444..89d576c 100644 (file)
@@ -25,6 +25,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -145,9 +147,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
 static void igb_restore_vlan(struct igb_adapter *);
 static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
 static void igb_ping_all_vfs(struct igb_adapter *);
@@ -325,16 +327,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
                        regs[n] = rd32(E1000_TXDCTL(n));
                break;
        default:
-               printk(KERN_INFO "%-15s %08x\n",
-                       reginfo->name, rd32(reginfo->ofs));
+               pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
                return;
        }
 
        snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
-       printk(KERN_INFO "%-15s ", rname);
-       for (n = 0; n < 4; n++)
-               printk(KERN_CONT "%08x ", regs[n]);
-       printk(KERN_CONT "\n");
+       pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+               regs[2], regs[3]);
 }
 
 /*
@@ -359,18 +358,15 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               printk(KERN_INFO "Device Name     state            "
-                       "trans_start      last_rx\n");
-               printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-               netdev->name,
-               netdev->state,
-               netdev->trans_start,
-               netdev->last_rx);
+               pr_info("Device Name     state            trans_start      "
+                       "last_rx\n");
+               pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+                       netdev->state, netdev->trans_start, netdev->last_rx);
        }
 
        /* Print Registers */
        dev_info(&adapter->pdev->dev, "Register Dump\n");
-       printk(KERN_INFO " Register Name   Value\n");
+       pr_info(" Register Name   Value\n");
        for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
             reginfo->name; reginfo++) {
                igb_regdump(hw, reginfo);
@@ -381,18 +377,17 @@ static void igb_dump(struct igb_adapter *adapter)
                goto exit;
 
        dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-               " leng ntw timestamp\n");
+       pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
        for (n = 0; n < adapter->num_tx_queues; n++) {
                struct igb_tx_buffer *buffer_info;
                tx_ring = adapter->tx_ring[n];
                buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
-               printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
-                          n, tx_ring->next_to_use, tx_ring->next_to_clean,
-                          (u64)buffer_info->dma,
-                          buffer_info->length,
-                          buffer_info->next_to_watch,
-                          (u64)buffer_info->time_stamp);
+               pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+                       n, tx_ring->next_to_use, tx_ring->next_to_clean,
+                       (u64)buffer_info->dma,
+                       buffer_info->length,
+                       buffer_info->next_to_watch,
+                       (u64)buffer_info->time_stamp);
        }
 
        /* Print TX Rings */
@@ -414,36 +409,38 @@ static void igb_dump(struct igb_adapter *adapter)
 
        for (n = 0; n < adapter->num_tx_queues; n++) {
                tx_ring = adapter->tx_ring[n];
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "T [desc]     [address 63:0  ] "
-                       "[PlPOCIStDDM Ln] [bi->dma       ] "
-                       "leng  ntw timestamp        bi->skb\n");
+               pr_info("------------------------------------\n");
+               pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+               pr_info("------------------------------------\n");
+               pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
+                       "[bi->dma       ] leng  ntw timestamp        "
+                       "bi->skb\n");
 
                for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+                       const char *next_desc;
                        struct igb_tx_buffer *buffer_info;
                        tx_desc = IGB_TX_DESC(tx_ring, i);
                        buffer_info = &tx_ring->tx_buffer_info[i];
                        u0 = (struct my_u0 *)tx_desc;
-                       printk(KERN_INFO "T [0x%03X]    %016llX %016llX %016llX"
-                               " %04X  %p %016llX %p", i,
+                       if (i == tx_ring->next_to_use &&
+                           i == tx_ring->next_to_clean)
+                               next_desc = " NTC/U";
+                       else if (i == tx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == tx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
+                       pr_info("T [0x%03X]    %016llX %016llX %016llX"
+                               " %04X  %p %016llX %p%s\n", i,
                                le64_to_cpu(u0->a),
                                le64_to_cpu(u0->b),
                                (u64)buffer_info->dma,
                                buffer_info->length,
                                buffer_info->next_to_watch,
                                (u64)buffer_info->time_stamp,
-                               buffer_info->skb);
-                       if (i == tx_ring->next_to_use &&
-                               i == tx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC/U\n");
-                       else if (i == tx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == tx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
+                               buffer_info->skb, next_desc);
 
                        if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
                                print_hex_dump(KERN_INFO, "",
@@ -456,11 +453,11 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print RX Rings Summary */
 rx_ring_summary:
        dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
-       printk(KERN_INFO "Queue [NTU] [NTC]\n");
+       pr_info("Queue [NTU] [NTC]\n");
        for (n = 0; n < adapter->num_rx_queues; n++) {
                rx_ring = adapter->rx_ring[n];
-               printk(KERN_INFO " %5d %5X %5X\n", n,
-                          rx_ring->next_to_use, rx_ring->next_to_clean);
+               pr_info(" %5d %5X %5X\n",
+                       n, rx_ring->next_to_use, rx_ring->next_to_clean);
        }
 
        /* Print RX Rings */
@@ -492,36 +489,43 @@ rx_ring_summary:
 
        for (n = 0; n < adapter->num_rx_queues; n++) {
                rx_ring = adapter->rx_ring[n];
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
-               printk(KERN_INFO "------------------------------------\n");
-               printk(KERN_INFO "R  [desc]      [ PktBuf     A0] "
-                       "[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
-                       "<-- Adv Rx Read format\n");
-               printk(KERN_INFO "RWB[desc]      [PcsmIpSHl PtRs] "
-                       "[vl er S cks ln] ---------------- [bi->skb] "
-                       "<-- Adv Rx Write-Back format\n");
+               pr_info("------------------------------------\n");
+               pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+               pr_info("------------------------------------\n");
+               pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
+                       "[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+               pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+                       "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
 
                for (i = 0; i < rx_ring->count; i++) {
+                       const char *next_desc;
                        struct igb_rx_buffer *buffer_info;
                        buffer_info = &rx_ring->rx_buffer_info[i];
                        rx_desc = IGB_RX_DESC(rx_ring, i);
                        u0 = (struct my_u0 *)rx_desc;
                        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+                       if (i == rx_ring->next_to_use)
+                               next_desc = " NTU";
+                       else if (i == rx_ring->next_to_clean)
+                               next_desc = " NTC";
+                       else
+                               next_desc = "";
+
                        if (staterr & E1000_RXD_STAT_DD) {
                                /* Descriptor Done */
-                               printk(KERN_INFO "RWB[0x%03X]     %016llX "
-                                       "%016llX ---------------- %p", i,
+                               pr_info("%s[0x%03X]     %016llX %016llX -------"
+                                       "--------- %p%s\n", "RWB", i,
                                        le64_to_cpu(u0->a),
                                        le64_to_cpu(u0->b),
-                                       buffer_info->skb);
+                                       buffer_info->skb, next_desc);
                        } else {
-                               printk(KERN_INFO "R  [0x%03X]     %016llX "
-                                       "%016llX %016llX %p", i,
+                               pr_info("%s[0x%03X]     %016llX %016llX %016llX"
+                                       " %p%s\n", "R  ", i,
                                        le64_to_cpu(u0->a),
                                        le64_to_cpu(u0->b),
                                        (u64)buffer_info->dma,
-                                       buffer_info->skb);
+                                       buffer_info->skb, next_desc);
 
                                if (netif_msg_pktdata(adapter)) {
                                        print_hex_dump(KERN_INFO, "",
@@ -538,14 +542,6 @@ rx_ring_summary:
                                          PAGE_SIZE/2, true);
                                }
                        }
-
-                       if (i == rx_ring->next_to_use)
-                               printk(KERN_CONT " NTU\n");
-                       else if (i == rx_ring->next_to_clean)
-                               printk(KERN_CONT " NTC\n");
-                       else
-                               printk(KERN_CONT "\n");
-
                }
        }
 
@@ -599,10 +595,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
 static int __init igb_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "%s - version %s\n",
+       pr_info("%s - version %s\n",
               igb_driver_string, igb_driver_version);
 
-       printk(KERN_INFO "%s\n", igb_copyright);
+       pr_info("%s\n", igb_copyright);
 
 #ifdef CONFIG_IGB_DCA
        dca_register_notify(&dca_notifier);
@@ -1742,7 +1738,8 @@ void igb_reset(struct igb_adapter *adapter)
        igb_get_phy_info(hw);
 }
 
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1753,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
        return features;
 }
 
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                igb_vlan_mode(netdev, features);
@@ -3640,23 +3638,23 @@ static void igb_watchdog_task(struct work_struct *work)
 
                        ctrl = rd32(E1000_CTRL);
                        /* Links status message must follow this format */
-                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
-                                "Flow Control: %s\n",
+                       printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+                              "Duplex, Flow Control: %s\n",
                               netdev->name,
                               adapter->link_speed,
                               adapter->link_duplex == FULL_DUPLEX ?
-                                "Full Duplex" : "Half Duplex",
-                              ((ctrl & E1000_CTRL_TFCE) &&
-                               (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
-                              ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
-                              ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
+                              "Full" : "Half",
+                              (ctrl & E1000_CTRL_TFCE) &&
+                              (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+                              (ctrl & E1000_CTRL_RFCE) ?  "RX" :
+                              (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
 
                        /* check for thermal sensor event */
-                       if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
-                               printk(KERN_INFO "igb: %s The network adapter "
-                                                "link speed was downshifted "
-                                                "because it overheated.\n",
-                                                netdev->name);
+                       if (igb_thermal_sensor_event(hw,
+                           E1000_THSTAT_LINK_THROTTLE)) {
+                               netdev_info(netdev, "The network adapter link "
+                                           "speed was downshifted because it "
+                                           "overheated\n");
                        }
 
                        /* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3684,10 @@ static void igb_watchdog_task(struct work_struct *work)
                        adapter->link_duplex = 0;
 
                        /* check for thermal sensor event */
-                       if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
-                               printk(KERN_ERR "igb: %s The network adapter "
-                                               "was stopped because it "
-                                               "overheated.\n",
-                                               netdev->name);
+                       if (igb_thermal_sensor_event(hw,
+                           E1000_THSTAT_PWR_DOWN)) {
+                               netdev_err(netdev, "The network adapter was "
+                                          "stopped because it overheated\n");
                        }
 
                        /* Links status message must follow this format */
@@ -6138,7 +6135,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
                return true;
 
        if (!page) {
-               page = netdev_alloc_page(rx_ring->netdev);
+               page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                bi->page = page;
                if (unlikely(!page)) {
                        rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6464,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
        return 0;
 }
 
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6491,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
        igb_rlpml_set(adapter);
 }
 
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6504,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        igb_vfta_set(hw, vid, true);
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6523,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
                igb_vfta_set(hw, vid, false);
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -7064,15 +7065,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                        wr32(E1000_DMCTXTH, 0);
 
                        /*
-                        * DMA Coalescing high water mark needs to be higher
-                        * than the RX threshold. set hwm to PBA -  2 * max
-                        * frame size
+                        * DMA Coalescing high water mark needs to be greater
+                        * than the Rx threshold. Set hwm to PBA - max frame
+                        * size in 16B units, capping it at PBA - 6KB.
                         */
-                       hwm = pba - (2 * adapter->max_frame_size);
+                       hwm = 64 * pba - adapter->max_frame_size / 16;
+                       if (hwm < 64 * (pba - 6))
+                               hwm = 64 * (pba - 6);
+                       reg = rd32(E1000_FCRTC);
+                       reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+                       reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+                               & E1000_FCRTC_RTH_COAL_MASK);
+                       wr32(E1000_FCRTC, reg);
+
+                       /*
+                        * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+                        * frame size, capping it at PBA - 10KB.
+                        */
+                       dmac_thr = pba - adapter->max_frame_size / 512;
+                       if (dmac_thr < pba - 10)
+                               dmac_thr = pba - 10;
                        reg = rd32(E1000_DMACR);
                        reg &= ~E1000_DMACR_DMACTHR_MASK;
-                       dmac_thr = pba - 4;
-
                        reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
                                & E1000_DMACR_DMACTHR_MASK);
 
@@ -7088,7 +7102,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
                         * coalescing(smart fifb)-UTRESH=0
                         */
                        wr32(E1000_DMCRTRH, 0);
-                       wr32(E1000_FCRTC, hwm);
 
                        reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
 
index 2c25858..7b600a1 100644 (file)
@@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
                               struct ethtool_drvinfo *drvinfo)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32] = "N/A";
 
-       strncpy(drvinfo->driver,  igbvf_driver_name, 32);
-       strncpy(drvinfo->version, igbvf_driver_version, 32);
-       strncpy(drvinfo->fw_version, firmware_version, 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  igbvf_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, igbvf_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = igbvf_get_regs_len(netdev);
        drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
 }
index cca7812..fd3da30 100644 (file)
@@ -25,6 +25,8 @@
 
 *******************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -1174,18 +1176,20 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
        e1000_rlpml_set_vf(hw, max_frame_size);
 }
 
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       if (hw->mac.ops.set_vfta(hw, vid, true))
+       if (hw->mac.ops.set_vfta(hw, vid, true)) {
                dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
-       else
-               set_bit(vid, adapter->active_vlans);
+               return -EINVAL;
+       }
+       set_bit(vid, adapter->active_vlans);
+       return 0;
 }
 
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
@@ -1195,11 +1199,13 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (!test_bit(__IGBVF_DOWN, &adapter->state))
                igbvf_irq_enable(adapter);
 
-       if (hw->mac.ops.set_vfta(hw, vid, false))
+       if (hw->mac.ops.set_vfta(hw, vid, false)) {
                dev_err(&adapter->pdev->dev,
                        "Failed to remove vlan id %d\n", vid);
-       else
-               clear_bit(vid, adapter->active_vlans);
+               return -EINVAL;
+       }
+       clear_bit(vid, adapter->active_vlans);
+       return 0;
 }
 
 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1752,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
 
 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
 {
-       dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
-                adapter->link_speed,
-                ((adapter->link_duplex == FULL_DUPLEX) ?
-                 "Full Duplex" : "Half Duplex"));
+       dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+                adapter->link_speed,
+                adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
 }
 
 static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2537,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
        dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
 }
 
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
@@ -2842,9 +2848,8 @@ static struct pci_driver igbvf_driver = {
 static int __init igbvf_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "%s - version %s\n",
-              igbvf_driver_string, igbvf_driver_version);
-       printk(KERN_INFO "%s\n", igbvf_copyright);
+       pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+       pr_info("%s\n", igbvf_copyright);
 
        ret = pci_register_driver(&igbvf_driver);
 
index 9dfce7d..dbb7dd2 100644 (file)
@@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev,
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
 
-       strncpy(drvinfo->driver,  ixgb_driver_name, 32);
-       strncpy(drvinfo->version, ixgb_driver_version, 32);
-       strncpy(drvinfo->fw_version, "N/A", 32);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver,  ixgb_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ixgb_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = IXGB_STATS_LEN;
        drvinfo->regdump_len = ixgb_get_regs_len(netdev);
        drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
index e21148f..c573655 100644 (file)
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
 
 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter)
        }
 }
 
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        /*
         * Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features)
 }
 
 static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
                return 0;
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
        IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
 }
 
-static void
+static int
 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        vfta |= (1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void
+static int
 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        vfta &= ~(1 << (vid & 0x1F));
        ixgb_write_vfta(&adapter->hw, index, vfta);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void
index 33b93ff..da31735 100644 (file)
@@ -158,10 +158,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       /* Abort a bad configuration */
-       if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-               return;
-
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +181,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
 
        if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
             adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
-               adapter->dcb_set_bitmap |= BIT_PFC;
+               adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +202,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       /* Abort bad configurations */
-       if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
-               return;
-
        if (prio != DCB_ATTR_VALUE_UNDEFINED)
                adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
        if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +301,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
        *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
 }
 
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               usleep_range(1000, 2000);
+
+       if (netif_running(dev))
+               dev->netdev_ops->ndo_stop(dev);
+
+       ixgbe_clear_interrupt_scheme(adapter);
+       ixgbe_init_interrupt_scheme(adapter);
+
+       if (netif_running(dev))
+               dev->netdev_ops->ndo_open(dev);
+
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
 static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +351,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        if (ret)
                return DCB_NO_HW_CHG;
 
-#ifdef IXGBE_FCOE
-       if (up && !(up & (1 << adapter->fcoe.up)))
-               adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
-       /*
-        * Only take down the adapter if an app change occurred. FCoE
-        * may shuffle tx rings in this case and this can not be done
-        * without a reset currently.
-        */
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-               while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
-                       usleep_range(1000, 2000);
-
-               adapter->fcoe.up = ffs(up) - 1;
-
-               if (netif_running(netdev))
-                       netdev->netdev_ops->ndo_stop(netdev);
-               ixgbe_clear_interrupt_scheme(adapter);
-       }
-#endif
-
        if (adapter->dcb_cfg.pfc_mode_enable) {
                switch (adapter->hw.mac.type) {
                case ixgbe_mac_82599EB:
@@ -385,15 +377,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
                }
        }
 
-#ifdef IXGBE_FCOE
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-               ixgbe_init_interrupt_scheme(adapter);
-               if (netif_running(netdev))
-                       netdev->netdev_ops->ndo_open(netdev);
-               ret = DCB_HW_CHG_RST;
-       }
-#endif
-
        if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
                u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
                u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +425,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
        if (adapter->dcb_cfg.pfc_mode_enable)
                adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-       if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
-               clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+       /* Reprogam FCoE hardware offloads when the traffic class
+        * FCoE is using changes. This happens if the APP info
+        * changes or the up2tc mapping is updated.
+        */
+       if ((up && !(up & (1 << adapter->fcoe.up))) ||
+           (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+               adapter->fcoe.up = ffs(up) - 1;
+               ixgbe_dcbnl_devreset(netdev);
+               ret = DCB_HW_CHG_RST;
+       }
+#endif
+
        adapter->dcb_set_bitmap = 0x00;
        return ret;
 }
@@ -661,22 +655,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
        return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
 }
 
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-
-       if (netif_running(dev))
-               dev->netdev_ops->ndo_stop(dev);
-
-       ixgbe_clear_interrupt_scheme(adapter);
-       ixgbe_init_interrupt_scheme(adapter);
-
-       if (netif_running(dev))
-               dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
 static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
                                   struct dcb_app *app)
 {
@@ -761,7 +739,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                ixgbe_dcbnl_ieee_setets(dev, &ets);
                ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
        } else if (mode & DCB_CAP_DCBX_VER_CEE) {
-               adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+               u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+               adapter->dcb_set_bitmap |= mask;
                ixgbe_dcbnl_set_all(dev);
        } else {
                /* Drop into single TC mode strict priority as this
index 70d58c3..91f871b 100644 (file)
@@ -888,23 +888,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
                               struct ethtool_drvinfo *drvinfo)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       char firmware_version[32];
        u32 nvm_track_id;
 
-       strncpy(drvinfo->driver, ixgbe_driver_name,
-               sizeof(drvinfo->driver) - 1);
-       strncpy(drvinfo->version, ixgbe_driver_version,
-               sizeof(drvinfo->version) - 1);
+       strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ixgbe_driver_version,
+               sizeof(drvinfo->version));
 
        nvm_track_id = (adapter->eeprom_verh << 16) |
                        adapter->eeprom_verl;
-       snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
                 nvm_track_id);
 
-       strncpy(drvinfo->fw_version, firmware_version,
-               sizeof(drvinfo->fw_version) - 1);
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
-               sizeof(drvinfo->bus_info) - 1);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = IXGBE_STATS_LEN;
        drvinfo->testinfo_len = IXGBE_TEST_LEN;
        drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
index 8ef92d1..5d94ce1 100644 (file)
@@ -1140,7 +1140,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 
                if (ring_is_ps_enabled(rx_ring)) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(rx_ring->netdev);
+                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                                if (!bi->page) {
                                        rx_ring->rx_stats.alloc_rx_page_failed++;
                                        goto no_buffers;
@@ -3044,7 +3044,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        hw->mac.ops.enable_rx_dma(hw, rxctrl);
 }
 
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3053,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        /* add VID to filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3066,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        /* remove VID from filter table */
        hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 /**
@@ -7174,7 +7178,8 @@ void ixgbe_do_reset(struct net_device *netdev)
                ixgbe_reset(adapter);
 }
 
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+       netdev_features_t data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
@@ -7204,7 +7209,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
        return data;
 }
 
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+       netdev_features_t data)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        bool need_reset = false;
index 00fcd39..cf6812d 100644 (file)
@@ -572,7 +572,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 
                /* reply to reset with ack and vf mac address */
                msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
-               memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+               memcpy(new_mac, vf_mac, ETH_ALEN);
                /*
                 * Piggyback the multicast filter type so VF can compute the
                 * correct vectors
index df04f1a..e8badab 100644 (file)
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
 void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
 int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
 int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
                           u8 qos);
index 6c5cca8..242643a 100644 (file)
@@ -1710,8 +1710,6 @@ enum {
 #define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
 #define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
 
-#define IXGBE_ETH_LENGTH_OF_ADDRESS   6
-
 #define IXGBE_EEPROM_PAGE_SIZE_MAX       128
 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2802,9 +2800,9 @@ struct ixgbe_eeprom_info {
 struct ixgbe_mac_info {
        struct ixgbe_mac_operations     ops;
        enum ixgbe_mac_type             type;
-       u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-       u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-       u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8                              addr[ETH_ALEN];
+       u8                              perm_addr[ETH_ALEN];
+       u8                              san_addr[ETH_ALEN];
        /* prefix for World Wide Node Name (WWNN) */
        u16                             wwnn_prefix;
        /* prefix for World Wide Port Name (WWPN) */
index e5101e9..8cc5ecc 100644 (file)
@@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
 {
        u32 macc_reg;
        u32 ledctl_reg;
+       ixgbe_link_speed speed;
+       bool link_up;
 
        /*
-        * In order for the blink bit in the LED control register
-        * to work, link and speed must be forced in the MAC. We
-        * will reverse this when we stop the blinking.
+        * Link should be up in order for the blink bit in the LED control
+        * register to work. Force link and speed in the MAC if link is down.
+        * This will be reversed when we stop the blinking.
         */
-       macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
-       macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
-       IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+       hw->mac.ops.check_link(hw, &speed, &link_up, false);
+       if (link_up == false) {
+               macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+               macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+               IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+       }
        /* Set the LED to LINK_UP + BLINK. */
        ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
        ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
index 78abb6f..2eb89cb 100644 (file)
@@ -35,7 +35,6 @@
 #define IXGBE_VF_IRQ_CLEAR_MASK         7
 #define IXGBE_VF_MAX_TX_QUEUES          1
 #define IXGBE_VF_MAX_RX_QUEUES          1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS     6
 
 /* Link speed */
 typedef u32 ixgbe_link_speed;
index e29ba45..dc8e651 100644 (file)
@@ -27,6 +27,8 @@
 
 /* ethtool support for ixgbevf */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -265,11 +267,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
-       strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
-       strlcpy(drvinfo->fw_version, "N/A", 4);
-       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ixgbevf_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
 }
 
 static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +551,8 @@ static const u32 register_test_patterns[] = {
        writel((W & M), (adapter->hw.hw_addr + R));                           \
        val = readl(adapter->hw.hw_addr + R);                                 \
        if ((W & M) != (val & M)) {                                           \
-               printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
-                                "expected 0x%08X\n", R, (val & M), (W & M)); \
+               pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+                      "0x%08X\n", R, (val & M), (W & M));                    \
                *data = R;                                                    \
                writel(before, (adapter->hw.hw_addr + R));                    \
                return 1;                                                     \
index 4c8e199..891162d 100644 (file)
@@ -29,6 +29,9 @@
 /******************************************************************************
  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
 ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
                if (!bi->page_dma &&
                    (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
                        if (!bi->page) {
-                               bi->page = netdev_alloc_page(adapter->netdev);
+                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
                                if (!bi->page) {
                                        adapter->alloc_rx_page_failed++;
                                        goto no_buffers;
@@ -1400,7 +1403,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        }
 }
 
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1412,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, true);
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1425,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        if (hw->mac.ops.set_vfta)
                hw->mac.ops.set_vfta(hw, vid, 0, false);
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1444,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
        int count = 0;
 
        if ((netdev_uc_count(netdev)) > 10) {
-               printk(KERN_ERR "Too many unicast filters - No Space\n");
+               pr_err("Too many unicast filters - No Space\n");
                return -ENOSPC;
        }
 
@@ -2135,7 +2142,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
 
        err = ixgbevf_alloc_queues(adapter);
        if (err) {
-               printk(KERN_ERR "Unable to allocate memory for queues\n");
+               pr_err("Unable to allocate memory for queues\n");
                goto err_alloc_queues;
        }
 
@@ -2189,7 +2196,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        } else {
                err = hw->mac.ops.init_hw(hw);
                if (err) {
-                       printk(KERN_ERR "init_shared_code failed: %d\n", err);
+                       pr_err("init_shared_code failed: %d\n", err);
                        goto out;
                }
        }
@@ -2630,8 +2637,8 @@ static int ixgbevf_open(struct net_device *netdev)
                 * the vf can't start. */
                if (hw->adapter_stopped) {
                        err = IXGBE_ERR_MBX;
-                       printk(KERN_ERR "Unable to start - perhaps the PF"
-                              " Driver isn't up yet\n");
+                       pr_err("Unable to start - perhaps the PF Driver isn't "
+                              "up yet\n");
                        goto err_setup_reset;
                }
        }
@@ -2842,10 +2849,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
                                break;
                        default:
                                if (unlikely(net_ratelimit())) {
-                                       printk(KERN_WARNING
-                                              "partial checksum but "
-                                              "proto=%x!\n",
-                                              skb->protocol);
+                                       pr_warn("partial checksum but "
+                                               "proto=%x!\n", skb->protocol);
                                }
                                break;
                        }
@@ -3249,7 +3254,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        return stats;
 }
 
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
@@ -3414,7 +3420,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
 
        if (!is_valid_ether_addr(netdev->dev_addr)) {
-               printk(KERN_ERR "invalid MAC address\n");
+               pr_err("invalid MAC address\n");
                err = -EIO;
                goto err_sw_init;
        }
@@ -3535,10 +3541,10 @@ static struct pci_driver ixgbevf_driver = {
 static int __init ixgbevf_init_module(void)
 {
        int ret;
-       printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
-              ixgbevf_driver_version);
+       pr_info("%s - version %s\n", ixgbevf_driver_string,
+               ixgbevf_driver_version);
 
-       printk(KERN_INFO "%s\n", ixgbevf_copyright);
+       pr_info("%s\n", ixgbevf_copyright);
 
        ret = pci_register_driver(&ixgbevf_driver);
        return ret;
index aa3682e..21533e3 100644 (file)
@@ -108,7 +108,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
        if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
                return IXGBE_ERR_INVALID_MAC_ADDR;
 
-       memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+       memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
        hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
 
        return 0;
@@ -211,7 +211,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
  **/
 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
 {
-       memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+       memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
 
        return 0;
 }
index 76b8457..27d651a 100644 (file)
@@ -1990,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
                struct page *page,
                u32 page_offset,
                u32 len,
-               u8 hidma)
+               bool hidma)
 {
        dma_addr_t dmaaddr;
 
@@ -2024,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        struct jme_ring *txring = &(jme->txring[0]);
        struct txdesc *txdesc = txring->desc, *ctxdesc;
        struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
-       u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+       bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
        int i, nr_frags = skb_shinfo(skb)->nr_frags;
        int mask = jme->tx_ring_mask;
        const struct skb_frag_struct *frag;
@@ -2399,9 +2399,9 @@ jme_get_drvinfo(struct net_device *netdev,
 {
        struct jme_adapter *jme = netdev_priv(netdev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(jme->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
 }
 
 static int
@@ -2727,8 +2727,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
        jme->msg_enable = value;
 }
 
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
 {
        if (netdev->mtu > 1900)
                features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2736,7 +2736,7 @@ jme_fix_features(struct net_device *netdev, u32 features)
 }
 
 static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
 
index d8430f4..6ad094f 100644 (file)
@@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = {
        .remove = korina_remove,
 };
 
-static int __init korina_init_module(void)
-{
-       return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
-       return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
 
 MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
 MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
index 194a031..e87847e 100644 (file)
@@ -1502,10 +1502,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 static void mv643xx_eth_get_drvinfo(struct net_device *dev,
                                    struct ethtool_drvinfo *drvinfo)
 {
-       strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
-       strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
-       strncpy(drvinfo->fw_version, "N/A", 32);
-       strncpy(drvinfo->bus_info, "platform", 32);
+       strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+       strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
        drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
 }
 
@@ -1578,10 +1580,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
 
 
 static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct mv643xx_eth_private *mp = netdev_priv(dev);
-       u32 rx_csum = features & NETIF_F_RXCSUM;
+       bool rx_csum = features & NETIF_F_RXCSUM;
 
        wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
 
index d17d062..5ec409e 100644 (file)
@@ -1645,18 +1645,7 @@ static struct platform_driver pxa168_eth_driver = {
                   },
 };
 
-static int __init pxa168_init_module(void)
-{
-       return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
-       platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
index c7b6083..d957b2c 100644 (file)
@@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev,
 {
        struct skge_port *skge = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(skge->hw->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+               sizeof(info->bus_info));
 }
 
 static const struct skge_stat {
index 7803efa..760c2b1 100644 (file)
@@ -1110,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2)
        sky2->tx_prod = sky2->tx_cons = 0;
        sky2->tx_tcpsum = 0;
        sky2->tx_last_mss = 0;
+       netdev_reset_queue(sky2->netdev);
 
        le = get_tx_le(sky2, &sky2->tx_prod);
        le->addr = 0;
@@ -1284,7 +1285,7 @@ static const uint32_t rss_init_key[10] = {
 };
 
 /* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -1402,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 #define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
 
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -1971,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
        if (tx_avail(sky2) <= MAX_SKB_TX_LE)
                netif_stop_queue(dev);
 
+       netdev_sent_queue(dev, skb->len);
        sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
 
        return NETDEV_TX_OK;
@@ -2002,7 +2004,8 @@ mapping_error:
 static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
 {
        struct net_device *dev = sky2->netdev;
-       unsigned idx;
+       u16 idx;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        BUG_ON(done >= sky2->tx_ring_size);
 
@@ -2017,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
                        netif_printk(sky2, tx_done, KERN_DEBUG, dev,
                                     "tx done %u\n", idx);
 
-                       u64_stats_update_begin(&sky2->tx_stats.syncp);
-                       ++sky2->tx_stats.packets;
-                       sky2->tx_stats.bytes += skb->len;
-                       u64_stats_update_end(&sky2->tx_stats.syncp);
+                       pkts_compl++;
+                       bytes_compl += skb->len;
 
                        re->skb = NULL;
                        dev_kfree_skb_any(skb);
@@ -2031,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
 
        sky2->tx_cons = idx;
        smp_mb();
+
+       netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+       u64_stats_update_begin(&sky2->tx_stats.syncp);
+       sky2->tx_stats.packets += pkts_compl;
+       sky2->tx_stats.bytes += bytes_compl;
+       u64_stats_update_end(&sky2->tx_stats.syncp);
 }
 
 static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -3643,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev,
 {
        struct sky2_port *sky2 = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
-       strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+               sizeof(info->bus_info));
 }
 
 static const struct sky2_stat {
@@ -4311,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
        return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
 }
 
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        const struct sky2_port *sky2 = netdev_priv(dev);
        const struct sky2_hw *hw = sky2->hw;
@@ -4335,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (changed & NETIF_F_RXCSUM) {
-               u32 on = features & NETIF_F_RXCSUM;
+               bool on = features & NETIF_F_RXCSUM;
                sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
                             on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
        }
index d1aa45a..4a40ab9 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
 
 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index 45aea9c..915e947 100644 (file)
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
 static int internal_err_reset = 1;
 module_param(internal_err_reset, int, 0644);
 MODULE_PARM_DESC(internal_err_reset,
-                "Reset device on internal errors if non-zero (default 1)");
+                "Reset device on internal errors if non-zero"
+                " (default 1, in SRIOV mode default is 0)");
 
 static void dump_err_buf(struct mlx4_dev *dev)
 {
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        phys_addr_t addr;
 
+       /*If we are in SRIOV the default of the module param must be 0*/
+       if (mlx4_is_mfunc(dev))
+               internal_err_reset = 0;
+
        INIT_LIST_HEAD(&priv->catas_err.list);
        init_timer(&priv->catas_err.timer);
        priv->catas_err.map = NULL;
index 78f5a1a..c4fef83 100644 (file)
 #include <linux/errno.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
 
 #include <asm/io.h>
 
 #include "mlx4.h"
+#include "fw.h"
 
 #define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK     0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
 
 enum {
        /* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
        int                     next;
        u64                     out_param;
        u16                     token;
+       u8                      fw_status;
 };
 
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+                                   struct mlx4_vhcr_cmd *in_vhcr);
+
 static int mlx4_status_to_errno(u8 status)
 {
        static const int trans_table[] = {
@@ -142,6 +152,119 @@ static int mlx4_status_to_errno(u8 status)
        return trans_table[status];
 }
 
+static int comm_pending(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 status = readl(&priv->mfunc.comm->slave_read);
+
+       return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 val;
+
+       priv->cmd.comm_toggle ^= 1;
+       val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+       __raw_writel((__force u32) cpu_to_be32(val),
+                    &priv->mfunc.comm->slave_write);
+       mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+                      unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       unsigned long end;
+       int err = 0;
+       int ret_from_pending = 0;
+
+       /* First, verify that the master reports correct status */
+       if (comm_pending(dev)) {
+               mlx4_warn(dev, "Communication channel is not idle."
+                         "my toggle is %d (cmd:0x%x)\n",
+                         priv->cmd.comm_toggle, cmd);
+               return -EAGAIN;
+       }
+
+       /* Write command */
+       down(&priv->cmd.poll_sem);
+       mlx4_comm_cmd_post(dev, cmd, param);
+
+       end = msecs_to_jiffies(timeout) + jiffies;
+       while (comm_pending(dev) && time_before(jiffies, end))
+               cond_resched();
+       ret_from_pending = comm_pending(dev);
+       if (ret_from_pending) {
+               /* check if the slave is trying to boot in the middle of
+                * FLR process. The only non-zero result in the RESET command
+                * is MLX4_DELAY_RESET_SLAVE*/
+               if ((MLX4_COMM_CMD_RESET == cmd)) {
+                       mlx4_warn(dev, "Got slave FLRed from Communication"
+                                 " channel (ret:0x%x)\n", ret_from_pending);
+                       err = MLX4_DELAY_RESET_SLAVE;
+               } else {
+                       mlx4_warn(dev, "Communication channel timed out\n");
+                       err = -ETIMEDOUT;
+               }
+       }
+
+       up(&priv->cmd.poll_sem);
+       return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+                             u16 param, unsigned long timeout)
+{
+       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+       struct mlx4_cmd_context *context;
+       int err = 0;
+
+       down(&cmd->event_sem);
+
+       spin_lock(&cmd->context_lock);
+       BUG_ON(cmd->free_head < 0);
+       context = &cmd->context[cmd->free_head];
+       context->token += cmd->token_mask + 1;
+       cmd->free_head = context->next;
+       spin_unlock(&cmd->context_lock);
+
+       init_completion(&context->done);
+
+       mlx4_comm_cmd_post(dev, op, param);
+
+       if (!wait_for_completion_timeout(&context->done,
+                                        msecs_to_jiffies(timeout))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = context->result;
+       if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, context->fw_status);
+               goto out;
+       }
+
+out:
+       spin_lock(&cmd->context_lock);
+       context->next = cmd->free_head;
+       cmd->free_head = context - cmd->context;
+       spin_unlock(&cmd->context_lock);
+
+       up(&cmd->event_sem);
+       return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+                 unsigned long timeout)
+{
+       if (mlx4_priv(dev)->cmd.use_events)
+               return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+       return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
 static int cmd_pending(struct mlx4_dev *dev)
 {
        u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +290,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 
        while (cmd_pending(dev)) {
-               if (time_after_eq(jiffies, end))
+               if (time_after_eq(jiffies, end)) {
+                       mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
                        goto out;
+               }
                cond_resched();
        }
 
@@ -192,7 +317,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                                               (cmd->toggle << HCR_T_BIT)       |
                                               (event ? (1 << HCR_E_BIT) : 0)   |
                                               (op_modifier << HCR_OPMOD_SHIFT) |
-                                              op),                       hcr + 6);
+                                              op), hcr + 6);
 
        /*
         * Make sure that our HCR writes don't get mixed in with
@@ -209,6 +334,62 @@ out:
        return ret;
 }
 
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+                         int out_is_imm, u32 in_modifier, u8 op_modifier,
+                         u16 op, unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+       int ret;
+
+       down(&priv->cmd.slave_sem);
+       vhcr->in_param = cpu_to_be64(in_param);
+       vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+       vhcr->in_modifier = cpu_to_be32(in_modifier);
+       vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+       vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+       vhcr->status = 0;
+       vhcr->flags = !!(priv->cmd.use_events) << 6;
+       if (mlx4_is_master(dev)) {
+               ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+               if (!ret) {
+                       if (out_is_imm) {
+                               if (out_param)
+                                       *out_param =
+                                               be64_to_cpu(vhcr->out_param);
+                               else {
+                                       mlx4_err(dev, "response expected while"
+                                                "output mailbox is NULL for "
+                                                "command 0x%x\n", op);
+                                       vhcr->status = -EINVAL;
+                               }
+                       }
+                       ret = vhcr->status;
+               }
+       } else {
+               ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+                                   MLX4_COMM_TIME + timeout);
+               if (!ret) {
+                       if (out_is_imm) {
+                               if (out_param)
+                                       *out_param =
+                                               be64_to_cpu(vhcr->out_param);
+                               else {
+                                       mlx4_err(dev, "response expected while"
+                                                "output mailbox is NULL for "
+                                                "command 0x%x\n", op);
+                                       vhcr->status = -EINVAL;
+                               }
+                       }
+                       ret = vhcr->status;
+               } else
+                       mlx4_err(dev, "failed execution of VHCR_POST command"
+                                "opcode 0x%x\n", op);
+       }
+       up(&priv->cmd.slave_sem);
+       return ret;
+}
+
 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                         int out_is_imm, u32 in_modifier, u8 op_modifier,
                         u16 op, unsigned long timeout)
@@ -217,6 +398,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        void __iomem *hcr = priv->cmd.hcr;
        int err = 0;
        unsigned long end;
+       u32 stat;
 
        down(&priv->cmd.poll_sem);
 
@@ -240,9 +422,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
                        (u64) be32_to_cpu((__force __be32)
                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
-       err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
-                                              __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+       stat = be32_to_cpu((__force __be32)
+                          __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+       err = mlx4_status_to_errno(stat);
+       if (err)
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, stat);
 
 out:
        up(&priv->cmd.poll_sem);
@@ -259,6 +444,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
        if (token != context->token)
                return;
 
+       context->fw_status = status;
        context->result    = mlx4_status_to_errno(status);
        context->out_param = out_param;
 
@@ -287,14 +473,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
                      in_modifier, op_modifier, op, context->token, 1);
 
-       if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+       if (!wait_for_completion_timeout(&context->done,
+                                        msecs_to_jiffies(timeout))) {
                err = -EBUSY;
                goto out;
        }
 
        err = context->result;
-       if (err)
+       if (err) {
+               mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+                        op, context->fw_status);
                goto out;
+       }
 
        if (out_is_imm)
                *out_param = context->out_param;
@@ -311,17 +501,1042 @@ out:
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout)
+              u16 op, unsigned long timeout, int native)
 {
-       if (mlx4_priv(dev)->cmd.use_events)
-               return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
-       else
-               return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
+       if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+               if (mlx4_priv(dev)->cmd.use_events)
+                       return mlx4_cmd_wait(dev, in_param, out_param,
+                                            out_is_imm, in_modifier,
+                                            op_modifier, op, timeout);
+               else
+                       return mlx4_cmd_poll(dev, in_param, out_param,
+                                            out_is_imm, in_modifier,
+                                            op_modifier, op, timeout);
+       }
+       return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+                             in_modifier, op_modifier, op, timeout);
 }
 EXPORT_SYMBOL_GPL(__mlx4_cmd);
 
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+                          int slave, u64 slave_addr,
+                          int size, int is_read)
+{
+       u64 in_param;
+       u64 out_param;
+
+       if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+           (slave & ~0x7f) | (size & 0xff)) {
+               mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+                             "master_addr:0x%llx slave_id:%d size:%d\n",
+                             slave_addr, master_addr, slave, size);
+               return -EINVAL;
+       }
+
+       if (is_read) {
+               in_param = (u64) slave | slave_addr;
+               out_param = (u64) dev->caps.function | master_addr;
+       } else {
+               in_param = (u64) dev->caps.function | master_addr;
+               out_param = (u64) slave | slave_addr;
+       }
+
+       return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+                           MLX4_CMD_ACCESS_MEM,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+       out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+       if (cmd->encode_slave_id) {
+               in_param &= 0xffffffffffffff00ll;
+               in_param |= slave;
+       }
+
+       err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+                        vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+                        MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+       if (cmd->out_is_imm)
+               vhcr->out_param = out_param;
+
+       return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+       {
+               .opcode = MLX4_CMD_QUERY_FW,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_HCA,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_DEV_CAP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_ADAPTER,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_INIT_PORT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_INIT_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_CLOSE_PORT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm  = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CLOSE_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_PORT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SET_PORT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_PORT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_MAP_EQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MAP_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_EQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_NOP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_ALLOC_RES,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = true,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_ALLOC_RES_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_FREE_RES,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_FREE_RES_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_MPT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_MPT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_MPT,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_MPT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_READ_MTT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_WRITE_MTT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_WRITE_MTT_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SYNC_TPT,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_EQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_EQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_EQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_CQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_CQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_CQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_MODIFY_CQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = true,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_MODIFY_CQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SW2HW_SRQ,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_SW2HW_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_HW2SW_SRQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_HW2SW_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_SRQ,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_ARM_SRQ,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_ARM_SRQ_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RST2INIT_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = true,
+               .verify = NULL,
+               .wrapper = mlx4_RST2INIT_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INIT2INIT_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INIT2RTR_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_INIT2RTR_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTR2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTS2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQERR2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_2ERR_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_RTS2SQD_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQD2SQD_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SQD2RTS_QP,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_2RST_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_2RST_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_QP,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SUSPEND_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_UNSUSPEND_QP,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_GEN_QP_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_QUERY_IF_STAT,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QUERY_IF_STAT_wrapper
+       },
+       /* Native multicast commands are not available for guests */
+       {
+               .opcode = MLX4_CMD_QP_ATTACH,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_QP_ATTACH_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_PROMISC,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_PROMISC_wrapper
+       },
+       /* Ethernet specific commands */
+       {
+               .opcode = MLX4_CMD_SET_VLAN_FLTR,
+               .has_inbox = true,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_SET_MCAST_FLTR,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_DUMP_ETH_STATS,
+               .has_inbox = false,
+               .has_outbox = true,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+       },
+       {
+               .opcode = MLX4_CMD_INFORM_FLR_DONE,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = NULL
+       },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+                                   struct mlx4_vhcr_cmd *in_vhcr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_info *cmd = NULL;
+       struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+       struct mlx4_vhcr *vhcr;
+       struct mlx4_cmd_mailbox *inbox = NULL;
+       struct mlx4_cmd_mailbox *outbox = NULL;
+       u64 in_param;
+       u64 out_param;
+       int ret = 0;
+       int i;
+
+       /* Create sw representation of Virtual HCR */
+       vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+       if (!vhcr)
+               return -ENOMEM;
+
+       /* DMA in the vHCR */
+       if (!in_vhcr) {
+               ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+                                     priv->mfunc.master.slave_state[slave].vhcr_dma,
+                                     ALIGN(sizeof(struct mlx4_vhcr_cmd),
+                                           MLX4_ACCESS_MEM_ALIGN), 1);
+               if (ret) {
+                       mlx4_err(dev, "%s:Failed reading vhcr"
+                                "ret: 0x%x\n", __func__, ret);
+                       kfree(vhcr);
+                       return ret;
+               }
+       }
+
+       /* Fill SW VHCR fields */
+       vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+       vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+       vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+       vhcr->token = be16_to_cpu(vhcr_cmd->token);
+       vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+       vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+       vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+       /* Lookup command */
+       for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+               if (vhcr->op == cmd_info[i].opcode) {
+                       cmd = &cmd_info[i];
+                       break;
+               }
+       }
+       if (!cmd) {
+               mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+                        vhcr->op, slave);
+               vhcr_cmd->status = -EINVAL;
+               goto out_status;
+       }
+
+       /* Read inbox */
+       if (cmd->has_inbox) {
+               vhcr->in_param &= INBOX_MASK;
+               inbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(inbox)) {
+                       ret = PTR_ERR(inbox);
+                       inbox = NULL;
+                       goto out;
+               }
+
+               ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+                                     vhcr->in_param,
+                                     MLX4_MAILBOX_SIZE, 1);
+               if (ret) {
+                       mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+                                __func__, cmd->opcode);
+                       goto out;
+               }
+       }
+
+       /* Apply permission and bound checks if applicable */
+       if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+               mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+                         "checks for resource_id:%d\n", vhcr->op, slave,
+                         vhcr->in_modifier);
+               vhcr_cmd->status = -EPERM;
+               goto out_status;
+       }
+
+       /* Allocate outbox */
+       if (cmd->has_outbox) {
+               outbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(outbox)) {
+                       ret = PTR_ERR(outbox);
+                       outbox = NULL;
+                       goto out;
+               }
+       }
+
+       /* Execute the command! */
+       if (cmd->wrapper) {
+               vhcr_cmd->status = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+                                          cmd);
+               if (cmd->out_is_imm)
+                       vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+       } else {
+               in_param = cmd->has_inbox ? (u64) inbox->dma :
+                       vhcr->in_param;
+               out_param = cmd->has_outbox ? (u64) outbox->dma :
+                       vhcr->out_param;
+               vhcr_cmd->status = __mlx4_cmd(dev, in_param, &out_param,
+                                        cmd->out_is_imm, vhcr->in_modifier,
+                                        vhcr->op_modifier, vhcr->op,
+                                        MLX4_CMD_TIME_CLASS_A,
+                                        MLX4_CMD_NATIVE);
+
+               if (vhcr_cmd->status) {
+                       mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+                                 " error:%d, status %d\n",
+                                 vhcr->op, slave, vhcr->errno,
+                                 vhcr_cmd->status);
+                       ret = vhcr_cmd->status;
+                       goto out;
+               }
+
+               if (cmd->out_is_imm) {
+                       vhcr->out_param = out_param;
+                       vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+               }
+       }
+
+       /* Write outbox if command completed successfully */
+       if (cmd->has_outbox && !vhcr->errno) {
+               ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+                                     vhcr->out_param,
+                                     MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+               if (ret) {
+                       mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+                       goto out;
+               }
+       }
+
+out_status:
+       /* DMA back vhcr result */
+       if (!in_vhcr) {
+               ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+                                     priv->mfunc.master.slave_state[slave].vhcr_dma,
+                                     ALIGN(sizeof(struct mlx4_vhcr),
+                                           MLX4_ACCESS_MEM_ALIGN),
+                                     MLX4_CMD_WRAPPED);
+               if (ret)
+                       mlx4_err(dev, "%s:Failed writing vhcr result\n",
+                                __func__);
+               else if (vhcr->e_bit &&
+                        mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+                               mlx4_warn(dev, "Failed to generate command completion "
+                                         "eqe for slave %d\n", slave);
+       }
+
+out:
+       kfree(vhcr);
+       mlx4_free_cmd_mailbox(dev, inbox);
+       mlx4_free_cmd_mailbox(dev, outbox);
+       return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+                              u16 param, u8 toggle)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+       u32 reply;
+       u32 slave_status = 0;
+       u8 is_going_down = 0;
+
+       slave_state[slave].comm_toggle ^= 1;
+       reply = (u32) slave_state[slave].comm_toggle << 31;
+       if (toggle != slave_state[slave].comm_toggle) {
+               mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+                         "STATE COMPROMISIED ***\n", toggle, slave);
+               goto reset_slave;
+       }
+       if (cmd == MLX4_COMM_CMD_RESET) {
+               mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+               slave_state[slave].active = false;
+               /*check if we are in the middle of FLR process,
+               if so return "retry" status to the slave*/
+               if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+                       slave_status = MLX4_DELAY_RESET_SLAVE;
+                       goto inform_slave_state;
+               }
+
+               /* write the version in the event field */
+               reply |= mlx4_comm_get_version();
+
+               goto reset_slave;
+       }
+       /*command from slave in the middle of FLR*/
+       if (cmd != MLX4_COMM_CMD_RESET &&
+           MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+               mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+                         "in the middle of FLR\n", slave, cmd);
+               return;
+       }
+
+       switch (cmd) {
+       case MLX4_COMM_CMD_VHCR0:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma = ((u64) param) << 48;
+               priv->mfunc.master.slave_state[slave].cookie = 0;
+               mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+               break;
+       case MLX4_COMM_CMD_VHCR1:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+               break;
+       case MLX4_COMM_CMD_VHCR2:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+               break;
+       case MLX4_COMM_CMD_VHCR_EN:
+               if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+                       goto reset_slave;
+               slave_state[slave].vhcr_dma |= param;
+               slave_state[slave].active = true;
+               break;
+       case MLX4_COMM_CMD_VHCR_POST:
+               if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+                   (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+                       goto reset_slave;
+               down(&priv->cmd.slave_sem);
+               if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+                       mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+                                " reseting slave.\n", slave);
+                       up(&priv->cmd.slave_sem);
+                       goto reset_slave;
+               }
+               up(&priv->cmd.slave_sem);
+               break;
+       default:
+               mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+               goto reset_slave;
+       }
+       spin_lock(&priv->mfunc.master.slave_state_lock);
+       if (!slave_state[slave].is_slave_going_down)
+               slave_state[slave].last_cmd = cmd;
+       else
+               is_going_down = 1;
+       spin_unlock(&priv->mfunc.master.slave_state_lock);
+       if (is_going_down) {
+               mlx4_warn(dev, "Slave is going down aborting command(%d)"
+                         " executing from slave:%d\n",
+                         cmd, slave);
+               return;
+       }
+       __raw_writel((__force u32) cpu_to_be32(reply),
+                    &priv->mfunc.comm[slave].slave_read);
+       mmiowb();
+
+       return;
+
+reset_slave:
+       /* cleanup any slave resources */
+       mlx4_delete_all_resources_for_slave(dev, slave);
+       spin_lock(&priv->mfunc.master.slave_state_lock);
+       if (!slave_state[slave].is_slave_going_down)
+               slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+       spin_unlock(&priv->mfunc.master.slave_state_lock);
+       /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+       memset(&slave_state[slave].event_eq, 0,
+              sizeof(struct mlx4_slave_event_eq_info));
+       __raw_writel((__force u32) cpu_to_be32(reply),
+                    &priv->mfunc.comm[slave].slave_read);
+       wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work,
+                            struct mlx4_mfunc_master_ctx,
+                            comm_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv =
+               container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       __be32 *bit_vec;
+       u32 comm_cmd;
+       u32 vec;
+       int i, j, slave;
+       int toggle;
+       int served = 0;
+       int reported = 0;
+       u32 slt;
+
+       bit_vec = master->comm_arm_bit_vector;
+       for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+               vec = be32_to_cpu(bit_vec[i]);
+               for (j = 0; j < 32; j++) {
+                       if (!(vec & (1 << j)))
+                               continue;
+                       ++reported;
+                       slave = (i * 32) + j;
+                       comm_cmd = swab32(readl(
+                                         &mfunc->comm[slave].slave_write));
+                       slt = swab32(readl(&mfunc->comm[slave].slave_read))
+                                    >> 31;
+                       toggle = comm_cmd >> 31;
+                       if (toggle != slt) {
+                               if (master->slave_state[slave].comm_toggle
+                                   != slt) {
+                                       printk(KERN_INFO "slave %d out of sync."
+                                              " read toggle %d, state toggle %d. "
+                                              "Resynching.\n", slave, slt,
+                                              master->slave_state[slave].comm_toggle);
+                                       master->slave_state[slave].comm_toggle =
+                                               slt;
+                               }
+                               mlx4_master_do_cmd(dev, slave,
+                                                  comm_cmd >> 16 & 0xff,
+                                                  comm_cmd & 0xffff, toggle);
+                               ++served;
+                       }
+               }
+       }
+
+       if (reported && reported != served)
+               mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+                         " but %d were served\n",
+                         reported, served);
+
+       if (mlx4_ARM_COMM_CHANNEL(dev))
+               mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int wr_toggle;
+       int rd_toggle;
+       unsigned long end;
+
+       wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+       end = jiffies + msecs_to_jiffies(5000);
+
+       while (time_before(jiffies, end)) {
+               rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+               if (rd_toggle == wr_toggle) {
+                       priv->cmd.comm_toggle = rd_toggle;
+                       return 0;
+               }
+
+               cond_resched();
+       }
+
+       /*
+        * we could reach here if for example the previous VM using this
+        * function misbehaved and left the channel with unsynced state. We
+        * should fix this here and give this VM a chance to use a properly
+        * synced channel
+        */
+       mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+       __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+       __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+       priv->cmd.comm_toggle = 0;
+
+       return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state;
+       int i, err, port;
+
+       priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                           &priv->mfunc.vhcr_dma,
+                                           GFP_KERNEL);
+       if (!priv->mfunc.vhcr) {
+               mlx4_err(dev, "Couldn't allocate vhcr.\n");
+               return -ENOMEM;
+       }
+
+       if (mlx4_is_master(dev))
+               priv->mfunc.comm =
+               ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+                       priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+       else
+               priv->mfunc.comm =
+               ioremap(pci_resource_start(dev->pdev, 2) +
+                       MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+       if (!priv->mfunc.comm) {
+               mlx4_err(dev, "Couldn't map communication vector.\n");
+               goto err_vhcr;
+       }
+
+       if (mlx4_is_master(dev)) {
+               priv->mfunc.master.slave_state =
+                       kzalloc(dev->num_slaves *
+                               sizeof(struct mlx4_slave_state), GFP_KERNEL);
+               if (!priv->mfunc.master.slave_state)
+                       goto err_comm;
+
+               for (i = 0; i < dev->num_slaves; ++i) {
+                       s_state = &priv->mfunc.master.slave_state[i];
+                       s_state->last_cmd = MLX4_COMM_CMD_RESET;
+                       __raw_writel((__force u32) 0,
+                                    &priv->mfunc.comm[i].slave_write);
+                       __raw_writel((__force u32) 0,
+                                    &priv->mfunc.comm[i].slave_read);
+                       mmiowb();
+                       for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+                               s_state->vlan_filter[port] =
+                                       kzalloc(sizeof(struct mlx4_vlan_fltr),
+                                               GFP_KERNEL);
+                               if (!s_state->vlan_filter[port]) {
+                                       if (--port)
+                                               kfree(s_state->vlan_filter[port]);
+                                       goto err_slaves;
+                               }
+                               INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+                       }
+                       spin_lock_init(&s_state->lock);
+               }
+
+               memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+               priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+               INIT_WORK(&priv->mfunc.master.comm_work,
+                         mlx4_master_comm_channel);
+               INIT_WORK(&priv->mfunc.master.slave_event_work,
+                         mlx4_gen_slave_eqe);
+               INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+                         mlx4_master_handle_slave_flr);
+               spin_lock_init(&priv->mfunc.master.slave_state_lock);
+               priv->mfunc.master.comm_wq =
+                       create_singlethread_workqueue("mlx4_comm");
+               if (!priv->mfunc.master.comm_wq)
+                       goto err_slaves;
+
+               if (mlx4_init_resource_tracker(dev))
+                       goto err_thread;
+
+               sema_init(&priv->cmd.slave_sem, 1);
+               err = mlx4_ARM_COMM_CHANNEL(dev);
+               if (err) {
+                       mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+                                err);
+                       goto err_resource;
+               }
+
+       } else {
+               err = sync_toggles(dev);
+               if (err) {
+                       mlx4_err(dev, "Couldn't sync toggles\n");
+                       goto err_comm;
+               }
+
+               sema_init(&priv->cmd.slave_sem, 1);
+       }
+       return 0;
+
+err_resource:
+       mlx4_free_resource_tracker(dev);
+err_thread:
+       flush_workqueue(priv->mfunc.master.comm_wq);
+       destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+       while (--i) {
+               for (port = 1; port <= MLX4_MAX_PORTS; port++)
+                       kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+       }
+       kfree(priv->mfunc.master.slave_state);
+err_comm:
+       iounmap(priv->mfunc.comm);
+err_vhcr:
+       dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                            priv->mfunc.vhcr,
+                                            priv->mfunc.vhcr_dma);
+       priv->mfunc.vhcr = NULL;
+       return -ENOMEM;
+}
+
 int mlx4_cmd_init(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1546,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
        priv->cmd.use_events = 0;
        priv->cmd.toggle     = 1;
 
-       priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
-                               MLX4_HCR_SIZE);
-       if (!priv->cmd.hcr) {
-               mlx4_err(dev, "Couldn't map command register.");
-               return -ENOMEM;
+       priv->cmd.hcr = NULL;
+       priv->mfunc.vhcr = NULL;
+
+       if (!mlx4_is_slave(dev)) {
+               priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+                                       MLX4_HCR_BASE, MLX4_HCR_SIZE);
+               if (!priv->cmd.hcr) {
+                       mlx4_err(dev, "Couldn't map command register.\n");
+                       return -ENOMEM;
+               }
        }
 
        priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
                                         MLX4_MAILBOX_SIZE,
                                         MLX4_MAILBOX_SIZE, 0);
-       if (!priv->cmd.pool) {
-               iounmap(priv->cmd.hcr);
-               return -ENOMEM;
-       }
+       if (!priv->cmd.pool)
+               goto err_hcr;
 
        return 0;
+
+err_hcr:
+       if (!mlx4_is_slave(dev))
+               iounmap(priv->cmd.hcr);
+       return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i, port;
+
+       if (mlx4_is_master(dev)) {
+               flush_workqueue(priv->mfunc.master.comm_wq);
+               destroy_workqueue(priv->mfunc.master.comm_wq);
+               for (i = 0; i < dev->num_slaves; i++) {
+                       for (port = 1; port <= MLX4_MAX_PORTS; port++)
+                               kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+               }
+               kfree(priv->mfunc.master.slave_state);
+               iounmap(priv->mfunc.comm);
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                                    priv->mfunc.vhcr,
+                                                    priv->mfunc.vhcr_dma);
+               priv->mfunc.vhcr = NULL;
+       }
 }
 
 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1598,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        pci_pool_destroy(priv->cmd.pool);
-       iounmap(priv->cmd.hcr);
+
+       if (!mlx4_is_slave(dev))
+               iounmap(priv->cmd.hcr);
 }
 
 /*
@@ -365,6 +1611,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        int i;
+       int err = 0;
 
        priv->cmd.context = kmalloc(priv->cmd.max_cmds *
                                   sizeof (struct mlx4_cmd_context),
@@ -389,11 +1636,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
                ; /* nothing */
        --priv->cmd.token_mask;
 
-       priv->cmd.use_events = 1;
-
        down(&priv->cmd.poll_sem);
+       priv->cmd.use_events = 1;
 
-       return 0;
+       return err;
 }
 
 /*
@@ -433,7 +1679,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
 }
 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
 
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+                          struct mlx4_cmd_mailbox *mailbox)
 {
        if (!mailbox)
                return;
@@ -442,3 +1689,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
        kfree(mailbox);
 }
 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+        return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
index 499a516..475f9d6 100644 (file)
@@ -34,9 +34,9 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
-#include <linux/gfp.h>
 
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/cq.h>
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_cq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       __be32                  logsize_usrpage;
-       __be16                  cq_period;
-       __be16                  cq_max_count;
-       u8                      reserved2[3];
-       u8                      comp_eqn;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  last_notified_index;
-       __be32                  solicit_producer_index;
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved4[2];
-       __be64                  db_rec_addr;
-};
-
 #define MLX4_CQ_STATUS_OK              ( 0 << 28)
 #define MLX4_CQ_STATUS_OVERFLOW                ( 9 << 28)
 #define MLX4_CQ_STATUS_WRITE_FAIL      (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
        cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                               cqn & (dev->caps.num_cqs - 1));
        if (!cq) {
-               mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+               mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
                return;
        }
 
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+                       MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num, u32 opmod)
 {
        return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
-                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
-                           MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+                           cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
 
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+       int err;
+
+       *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+       if (*cqn == -1)
+               return -ENOMEM;
+
+       err = mlx4_table_get(dev, &cq_table->table, *cqn);
+       if (err)
+               goto err_out;
+
+       err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+       if (err)
+               goto err_put;
+       return 0;
+
+err_put:
+       mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+       mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+       return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
+               else {
+                       *cqn = get_param_l(&out_param);
+                       return 0;
+               }
+       }
+       return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+       mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+       mlx4_table_put(dev, &cq_table->table, cqn);
+       mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, cqn);
+               err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+       } else
+               __mlx4_cq_free_icm(dev, cqn);
+}
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
                  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
                  unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
 
        cq->vector = vector;
 
-       cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
-       if (cq->cqn == -1)
-               return -ENOMEM;
-
-       err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
-       if (err)
-               goto err_out;
-
-       err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+       err = mlx4_cq_alloc_icm(dev, &cq->cqn);
        if (err)
-               goto err_put;
+               return err;
 
        spin_lock_irq(&cq_table->lock);
        err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
        spin_unlock_irq(&cq_table->lock);
        if (err)
-               goto err_cmpt_put;
+               goto err_icm;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
        radix_tree_delete(&cq_table->tree, cq->cqn);
        spin_unlock_irq(&cq_table->lock);
 
-err_cmpt_put:
-       mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+       mlx4_cq_free_icm(dev, cq->cqn);
 
        return err;
 }
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
                complete(&cq->free);
        wait_for_completion(&cq->free);
 
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+       mlx4_cq_free_icm(dev, cq->cqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_cq_free);
 
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
        spin_lock_init(&cq_table->lock);
        INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
                               dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
        /* Nothing to do to clean up radix_tree */
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
 }
index 227997d..2d1a342 100644 (file)
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
        int err;
 
        cq->size = entries;
-       if (mode == RX)
-               cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
-       else
-               cq->buf_size = sizeof(struct mlx4_cqe);
+       cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
 
        cq->ring = ring;
        cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                cq->size = priv->rx_ring[cq->ring].actual_size;
 
        err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
-                           cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+                           cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
        if (err)
                return err;
 
index 74e2a2a..7dbc6a2 100644 (file)
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
 
-       strncpy(drvinfo->driver, DRV_NAME, 32);
-       strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
-       sprintf(drvinfo->fw_version, "%d.%d.%d",
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d",
                (u16) (mdev->dev->caps.fw_ver >> 32),
                (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
                (u16) (mdev->dev->caps.fw_ver & 0xffff));
-       strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->regdump_len = 0;
        drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        int err = 0;
        u64 config = 0;
+       u64 mask;
 
-       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+       if ((priv->port < 1) || (priv->port > 2)) {
+               en_err(priv, "Failed to get WoL information\n");
+               return;
+       }
+
+       mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+               MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+       if (!(priv->mdev->dev->caps.flags & mask)) {
                wol->supported = 0;
                wol->wolopts = 0;
                return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
        struct mlx4_en_priv *priv = netdev_priv(netdev);
        u64 config = 0;
        int err = 0;
+       u64 mask;
+
+       if ((priv->port < 1) || (priv->port > 2))
+               return -EOPNOTSUPP;
+
+       mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+               MLX4_DEV_CAP_FLAG_WOL_PORT2;
 
-       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+       if (!(priv->mdev->dev->caps.flags & mask))
                return -EOPNOTSUPP;
 
        if (wol->supported & ~WAKE_MAGIC)
index 78d776b..1db6fea 100644 (file)
@@ -45,7 +45,7 @@
 #include "mlx4_en.h"
 #include "en_port.h"
 
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                en_err(priv, "failed adding vlan %d\n", vid);
        mutex_unlock(&mdev->state_lock);
 
+       return 0;
 }
 
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                        en_err(priv, "Failed configuring VLAN filter\n");
        }
        mutex_unlock(&mdev->state_lock);
+
+       return 0;
 }
 
 u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
        if (priv->port_up) {
                /* Remove old MAC and insert the new one */
                err = mlx4_replace_mac(mdev->dev, priv->port,
-                                      priv->base_qpn, priv->mac, 0);
+                                      priv->base_qpn, priv->mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
        } else
@@ -204,6 +207,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
                goto out;
        }
 
+       if (!netif_carrier_ok(dev)) {
+               if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+                       if (priv->port_state.link_state) {
+                               priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+                               netif_carrier_on(dev);
+                               en_dbg(LINK, priv, "Link Up\n");
+                       }
+               }
+       }
+
        /*
         * Promsicuous mode: disable all filters
         */
@@ -599,12 +612,12 @@ int mlx4_en_start_port(struct net_device *dev)
                ++rx_index;
        }
 
-       /* Set port mac number */
-       en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
-       err = mlx4_register_mac(mdev->dev, priv->port,
-                               priv->mac, &priv->base_qpn, 0);
+       /* Set qp number */
+       en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+       err = mlx4_get_eth_qp(mdev->dev, priv->port,
+                               priv->mac, &priv->base_qpn);
        if (err) {
-               en_err(priv, "Failed setting port mac\n");
+               en_err(priv, "Failed getting eth qp\n");
                goto cq_err;
        }
        mdev->mac_removed[priv->port] = 0;
@@ -699,7 +712,7 @@ tx_err:
 
        mlx4_en_release_rss_steer(priv);
 mac_err:
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
 cq_err:
        while (rx_index--)
                mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +758,6 @@ void mlx4_en_stop_port(struct net_device *dev)
        /* Flush multicast filter */
        mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
 
-       /* Unregister Mac address for the port */
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
-       mdev->mac_removed[priv->port] = 1;
-
        /* Free TX Rings */
        for (i = 0; i < priv->tx_ring_num; i++) {
                mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +771,10 @@ void mlx4_en_stop_port(struct net_device *dev)
        /* Free RSS qps */
        mlx4_en_release_rss_steer(priv);
 
+       /* Unregister Mac address for the port */
+       mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+       mdev->mac_removed[priv->port] = 1;
+
        /* Free RX Rings */
        for (i = 0; i < priv->rx_ring_num; i++) {
                mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -974,6 +987,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+static int mlx4_en_set_features(struct net_device *netdev,
+               netdev_features_t features)
+{
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+       if (features & NETIF_F_LOOPBACK)
+               priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+       else
+               priv->ctrl_flags &=
+                       cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+       return 0;
+
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_open               = mlx4_en_open,
        .ndo_stop               = mlx4_en_close,
@@ -990,6 +1018,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
+       .ndo_set_features       = mlx4_en_set_features,
 };
 
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1051,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->port = port;
        priv->port_up = false;
        priv->flags = prof->flags;
+       priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+                       MLX4_WQE_CTRL_SOLICITED);
        priv->tx_ring_num = prof->tx_ring_num;
        priv->rx_ring_num = prof->rx_ring_num;
        priv->mac_index = -1;
@@ -1088,6 +1119,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->features = dev->hw_features | NETIF_F_HIGHDMA |
                        NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
                        NETIF_F_HW_VLAN_FILTER;
+       dev->hw_features |= NETIF_F_LOOPBACK;
 
        mdev->pndev[port] = dev;
 
index 03c84cd..3317914 100644 (file)
 #include "mlx4_en.h"
 
 
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
-                       u64 mac, u64 clear, u8 mode)
-{
-       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
-                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
                filter->entry[i] = cpu_to_be32(entry);
        }
        err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
-                      MLX4_CMD_TIME_CLASS_B);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
-                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_general_context *context;
-       int err;
-       u32 in_mod;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->flags = SET_PORT_GEN_ALL_VALID;
-       context->mtu = cpu_to_be16(mtu);
-       context->pptx = (pptx * (!pfctx)) << 7;
-       context->pfctx = pfctx;
-       context->pprx = (pprx * (!pfcrx)) << 7;
-       context->pfcrx = pfcrx;
-
-       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
-                          u8 promisc)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_rqp_calc_context *context;
-       int err;
-       u32 in_mod;
-       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
-                                               MCAST_DIRECT : MCAST_DEFAULT;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
-                       dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
-               return 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->base_qpn = cpu_to_be32(base_qpn);
-       context->n_mac = dev->caps.log_num_macs;
-       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
-                                      base_qpn);
-       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
-                                    base_qpn);
-       context->intra_no_vlan = 0;
-       context->no_vlan = MLX4_NO_VLAN_IDX;
-       context->intra_vlan_miss = 0;
-       context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
-       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
                return PTR_ERR(mailbox);
        memset(mailbox->buf, 0, sizeof(*qport_context));
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
-                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err)
                goto out;
        qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                return PTR_ERR(mailbox);
        memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
        err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
-                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err)
                goto out;
 
index 19eb244..6934fd7 100644 (file)
 #define SET_PORT_PROMISC_SHIFT 31
 #define SET_PORT_MC_PROMISC_SHIFT      30
 
-enum {
-       MLX4_CMD_SET_VLAN_FLTR  = 0x47,
-       MLX4_CMD_SET_MCAST_FLTR = 0x48,
-       MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
-       MCAST_DIRECT_ONLY       = 0,
-       MCAST_DIRECT            = 1,
-       MCAST_DEFAULT           = 2
-};
-
-struct mlx4_set_port_general_context {
-       u8 reserved[3];
-       u8 flags;
-       u16 reserved2;
-       __be16 mtu;
-       u8 pptx;
-       u8 pfctx;
-       u16 reserved3;
-       u8 pprx;
-       u8 pfcrx;
-       u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
-       __be32 base_qpn;
-       u8 rererved;
-       u8 n_mac;
-       u8 n_vlan;
-       u8 n_prio;
-       u8 reserved2[3];
-       u8 mac_miss;
-       u8 intra_no_vlan;
-       u8 no_vlan;
-       u8 intra_vlan_miss;
-       u8 vlan_miss;
-       u8 reserved3[3];
-       u8 no_vlan_prio;
-       __be32 promisc;
-       __be32 mcast;
-};
-
 #define VLAN_FLTR_SIZE 128
 struct mlx4_set_vlan_fltr_mbox {
        __be32 entry[VLAN_FLTR_SIZE];
index 0dfb4ec..bcbc54c 100644 (file)
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
        struct mlx4_en_dev *mdev = priv->mdev;
 
        memset(context, 0, sizeof *context);
-       context->flags = cpu_to_be32(7 << 16 | rss << 13);
+       context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
        context->pd = cpu_to_be32(mdev->priv_pdn);
        context->mtu_msgmax = 0xff;
        if (!is_tx && !rss)
index c2df6c3..630a7c1 100644 (file)
@@ -541,6 +541,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        unsigned int length;
        int polled = 0;
        int ip_summed;
+       struct ethhdr *ethh;
+       u64 s_mac;
 
        if (!priv->port_up)
                return 0;
@@ -577,6 +579,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        goto next;
                }
 
+               /* Get pointer to first fragment since we haven't skb yet and
+                * cast it to ethhdr struct */
+               ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+                                        skb_frags[0].offset);
+               s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+               /* If source MAC is equal to our own MAC and not performing
+                * the selftest or flb disabled - drop the packet */
+               if (s_mac == priv->mac &&
+                       (!(dev->features & NETIF_F_LOOPBACK) ||
+                        !priv->validate_loopback))
+                       goto next;
+
                /*
                 * Packet is OK - process it.
                 */
@@ -837,9 +852,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_rss_map *rss_map = &priv->rss_map;
        struct mlx4_qp_context context;
-       struct mlx4_en_rss_context *rss_context;
+       struct mlx4_rss_context *rss_context;
        void *ptr;
-       u8 rss_mask = 0x3f;
+       u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+                       MLX4_RSS_TCP_IPV6);
        int i, qpn;
        int err = 0;
        int good_qps = 0;
@@ -877,18 +893,21 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
                                priv->rx_ring[0].cqn, &context);
 
-       ptr = ((void *) &context) + 0x3c;
+       ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+                                       + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
        rss_context = ptr;
        rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
                                            (rss_map->base_qpn));
+       if (priv->mdev->profile.udp_rss) {
+               rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+               rss_context->base_qpn_udp = rss_context->default_qpn;
+       }
        rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
        rss_context->flags = rss_mask;
-       rss_context->hash_fn = 1;
+       rss_context->hash_fn = MLX4_RSS_HASH_TOP;
        for (i = 0; i < 10; i++)
                rss_context->rss_key[i] = rsskey[i];
 
-       if (priv->mdev->profile.udp_rss)
-               rss_context->base_qpn_udp = rss_context->default_qpn;
        err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
                               &rss_map->indir_qp, &rss_map->indir_state);
        if (err)
index 9fdbcec..bf2e5d3 100644 (file)
@@ -43,7 +43,7 @@
 static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
 {
        return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
index d901b42..9ef9038 100644 (file)
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
        return cnt;
 }
 
-
 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cq *mcq = &cq->mcq;
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-       struct mlx4_cqe *cqe = cq->buf;
+       struct mlx4_cqe *cqe;
        u16 index;
-       u16 new_index;
+       u16 new_index, ring_index;
        u32 txbbs_skipped = 0;
-       u32 cq_last_sav;
-
-       /* index always points to the first TXBB of the last polled descriptor */
-       index = ring->cons & ring->size_mask;
-       new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-       if (index == new_index)
-               return;
+       u32 cons_index = mcq->cons_index;
+       int size = cq->size;
+       u32 size_mask = ring->size_mask;
+       struct mlx4_cqe *buf = cq->buf;
 
        if (!priv->port_up)
                return;
 
-       /*
-        * We use a two-stage loop:
-        * - the first samples the HW-updated CQE
-        * - the second frees TXBBs until the last sample
-        * This lets us amortize CQE cache misses, while still polling the CQ
-        * until is quiescent.
-        */
-       cq_last_sav = mcq->cons_index;
-       do {
+       index = cons_index & size_mask;
+       cqe = &buf[index];
+       ring_index = ring->cons & size_mask;
+
+       /* Process all completed CQEs */
+       while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+                       cons_index & size)) {
+               /*
+                * make sure we read the CQE after we read the
+                * ownership bit
+                */
+               rmb();
+
+               /* Skip over last polled CQE */
+               new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
                do {
-                       /* Skip over last polled CQE */
-                       index = (index + ring->last_nr_txbb) & ring->size_mask;
                        txbbs_skipped += ring->last_nr_txbb;
-
-                       /* Poll next CQE */
+                       ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+                       /* free next descriptor */
                        ring->last_nr_txbb = mlx4_en_free_tx_desc(
-                                               priv, ring, index,
-                                               !!((ring->cons + txbbs_skipped) &
-                                                  ring->size));
-                       ++mcq->cons_index;
-
-               } while (index != new_index);
+                                       priv, ring, ring_index,
+                                       !!((ring->cons + txbbs_skipped) &
+                                                       ring->size));
+               } while (ring_index != new_index);
+
+               ++cons_index;
+               index = cons_index & size_mask;
+               cqe = &buf[index];
+       }
 
-               new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-       } while (index != new_index);
-       AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
-                        (u32) (mcq->cons_index - cq_last_sav));
 
        /*
         * To prevent CQ overflow we first update CQ consumer and only then
         * the ring consumer.
         */
+       mcq->cons_index = cons_index;
        mlx4_cq_set_ci(mcq);
        wmb();
        ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
                inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
        }
        tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
-       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+               (!!vlan_tx_tag_present(skb));
        tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
 }
 
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Prepare ctrl segement apart opcode+ownership, which depends on
         * whether LSO is used */
        tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+               !!vlan_tx_tag_present(skb);
        tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
-       tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
-                                               MLX4_WQE_CTRL_SOLICITED);
+       tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
                                                         MLX4_WQE_CTRL_TCP_UDP_CSUM);
                ring->tx_csum++;
        }
 
-       if (unlikely(priv->validate_loopback)) {
-               /* Copy dst mac address to wqe */
-               skb_reset_mac_header(skb);
-               ethh = eth_hdr(skb);
-               if (ethh && ethh->h_dest) {
-                       mac = mlx4_en_mac_to_u64(ethh->h_dest);
-                       mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
-                       mac_l = (u32) (mac & 0xffffffff);
-                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
-                       tx_desc->ctrl.imm = cpu_to_be32(mac_l);
-               }
+       /* Copy dst mac address to wqe */
+       skb_reset_mac_header(skb);
+       ethh = eth_hdr(skb);
+       if (ethh && ethh->h_dest) {
+               mac = mlx4_en_mac_to_u64(ethh->h_dest);
+               mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+               mac_l = (u32) (mac & 0xffffffff);
+               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+               tx_desc->ctrl.imm = cpu_to_be32(mac_l);
        }
 
        /* Handle LSO (TSO) packets */
index 24ee967..1e9b55e 100644 (file)
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       u8                      log_eq_size;
-       u8                      reserved2[4];
-       u8                      eq_period;
-       u8                      reserved3;
-       u8                      eq_max_count;
-       u8                      reserved4[3];
-       u8                      intr;
-       u8                      log_page_size;
-       u8                      reserved5[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       u32                     reserved6[2];
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved7[4];
-};
-
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
                               (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
                               (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
-                              (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
-       u8                      reserved1;
-       u8                      type;
-       u8                      reserved2;
-       u8                      subtype;
-       union {
-               u32             raw[6];
-               struct {
-                       __be32  cqn;
-               } __packed comp;
-               struct {
-                       u16     reserved1;
-                       __be16  token;
-                       u32     reserved2;
-                       u8      reserved3[3];
-                       u8      status;
-                       __be64  out_param;
-               } __packed cmd;
-               struct {
-                       __be32  qpn;
-               } __packed qp;
-               struct {
-                       __be32  srqn;
-               } __packed srq;
-               struct {
-                       __be32  cqn;
-                       u32     reserved1;
-                       u8      reserved2[3];
-                       u8      syndrome;
-               } __packed cq_err;
-               struct {
-                       u32     reserved1[2];
-                       __be32  port;
-               } __packed port_change;
-       }                       event;
-       u8                      reserved3[3];
-       u8                      owner;
-} __packed;
+                              (1ull << MLX4_EVENT_TYPE_CMD)                | \
+                              (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL)       | \
+                              (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
 
 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 {
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
        return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 }
 
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+       struct mlx4_eqe *eqe =
+               &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+       return (!!(eqe->owner & 0x80) ^
+               !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+               eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work, struct mlx4_mfunc_master_ctx,
+                            slave_event_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+       struct mlx4_eqe *eqe;
+       u8 slave;
+       int i;
+
+       for (eqe = next_slave_event_eqe(slave_eq); eqe;
+             eqe = next_slave_event_eqe(slave_eq)) {
+               slave = eqe->slave_id;
+
+               /* All active slaves need to receive the event */
+               if (slave == ALL_SLAVES) {
+                       for (i = 0; i < dev->num_slaves; i++) {
+                               if (i != dev->caps.function &&
+                                   master->slave_state[i].active)
+                                       if (mlx4_GEN_EQE(dev, i, eqe))
+                                               mlx4_warn(dev, "Failed to "
+                                                         " generate event "
+                                                         "for slave %d\n", i);
+                       }
+               } else {
+                       if (mlx4_GEN_EQE(dev, slave, eqe))
+                               mlx4_warn(dev, "Failed to generate event "
+                                              "for slave %d\n", slave);
+               }
+               ++slave_eq->cons;
+       }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+       struct mlx4_eqe *s_eqe =
+               &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+       if ((!!(s_eqe->owner & 0x80)) ^
+           (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+               mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+                         "No free EQE on slave events queue\n", slave);
+               return;
+       }
+
+       memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+       s_eqe->slave_id = slave;
+       /* ensure all information is written before setting the ownersip bit */
+       wmb();
+       s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+       ++slave_eq->prod;
+
+       queue_work(priv->mfunc.master.comm_wq,
+                  &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+                            struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave =
+               &priv->mfunc.master.slave_state[slave];
+
+       if (!s_slave->active) {
+               /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+               return;
+       }
+
+       slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+       struct mlx4_mfunc_master_ctx *master =
+               container_of(work, struct mlx4_mfunc_master_ctx,
+                            slave_flr_event_work);
+       struct mlx4_mfunc *mfunc =
+               container_of(master, struct mlx4_mfunc, master);
+       struct mlx4_priv *priv =
+               container_of(mfunc, struct mlx4_priv, mfunc);
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+       int i;
+       int err;
+
+       mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+       for (i = 0 ; i < dev->num_slaves; i++) {
+
+               if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+                       mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+                                "clean slave: %d\n", i);
+
+                       mlx4_delete_all_resources_for_slave(dev, i);
+                       /*return the slave to running mode*/
+                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+                       slave_state[i].is_slave_going_down = 0;
+                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       /*notify the FW:*/
+                       err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+                                      MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+                       if (err)
+                               mlx4_warn(dev, "Failed to notify FW on "
+                                         "FLR done (slave:%d)\n", i);
+               }
+       }
+}
+
 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
+       struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_eqe *eqe;
        int cqn;
        int eqes_found = 0;
        int set_ci = 0;
        int port;
+       int slave = 0;
+       int ret;
+       u32 flr_slave;
+       u8 update_slave_state;
+       int i;
 
        while ((eqe = next_eqe_sw(eq))) {
                /*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
                case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
                case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
-                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
-                                     eqe->type);
+                       mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+                       if (mlx4_is_master(dev)) {
+                               /* forward only to slave owning the QP */
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                               RES_QP,
+                                               be32_to_cpu(eqe->event.qp.qpn)
+                                               & 0xffffff, &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_dbg(dev, "QP event %02x(%02x) on "
+                                                "EQ %d at index %u: could "
+                                                "not get slave id (%d)\n",
+                                                eqe->type, eqe->subtype,
+                                                eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+
+                       }
+                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+                                     0xffffff, eqe->type);
                        break;
 
                case MLX4_EVENT_TYPE_SRQ_LIMIT:
+                       mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+                                 __func__);
                case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
-                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
-                                     eqe->type);
+                       if (mlx4_is_master(dev)) {
+                               /* forward only to slave owning the SRQ */
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                               RES_SRQ,
+                                               be32_to_cpu(eqe->event.srq.srqn)
+                                               & 0xffffff,
+                                               &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_warn(dev, "SRQ event %02x(%02x) "
+                                                 "on EQ %d at index %u: could"
+                                                 " not get slave id (%d)\n",
+                                                 eqe->type, eqe->subtype,
+                                                 eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+                                         " event: %02x(%02x)\n", __func__,
+                                         slave,
+                                         be32_to_cpu(eqe->event.srq.srqn),
+                                         eqe->type, eqe->subtype);
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_warn(dev, "%s: sending event "
+                                                 "%02x(%02x) to slave:%d\n",
+                                                  __func__, eqe->type,
+                                                 eqe->subtype, slave);
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+                       }
+                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+                                      0xffffff, eqe->type);
                        break;
 
                case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                case MLX4_EVENT_TYPE_PORT_CHANGE:
                        port = be32_to_cpu(eqe->event.port_change.port) >> 28;
                        if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+                               mlx4_dispatch_event(dev,
+                                                   MLX4_DEV_EVENT_PORT_DOWN,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+                               if (mlx4_is_master(dev))
+                                       /*change the state of all slave's port
+                                       * to down:*/
+                                       for (i = 0; i < dev->num_slaves; i++) {
+                                               mlx4_dbg(dev, "%s: Sending "
+                                                        "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+                                                        " to slave: %d, port:%d\n",
+                                                        __func__, i, port);
+                                               if (i == dev->caps.function)
+                                                       continue;
+                                               mlx4_slave_event(dev, i, eqe);
+                                       }
                        } else {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+                               mlx4_dispatch_event(dev,
+                                                   MLX4_DEV_EVENT_PORT_UP,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+                               if (mlx4_is_master(dev)) {
+                                       for (i = 0; i < dev->num_slaves; i++) {
+                                               if (i == dev->caps.function)
+                                                       continue;
+                                               mlx4_slave_event(dev, i, eqe);
+                                       }
+                               }
                        }
                        break;
 
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                  eqe->event.cq_err.syndrome == 1 ?
                                  "overrun" : "access violation",
                                  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
-                       mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+                       if (mlx4_is_master(dev)) {
+                               ret = mlx4_get_slave_from_resource_id(dev,
+                                       RES_CQ,
+                                       be32_to_cpu(eqe->event.cq_err.cqn)
+                                       & 0xffffff, &slave);
+                               if (ret && ret != -ENOENT) {
+                                       mlx4_dbg(dev, "CQ event %02x(%02x) on "
+                                                "EQ %d at index %u: could "
+                                                 "not get slave id (%d)\n",
+                                                 eqe->type, eqe->subtype,
+                                                 eq->eqn, eq->cons_index, ret);
+                                       break;
+                               }
+
+                               if (!ret && slave != dev->caps.function) {
+                                       mlx4_slave_event(dev, slave, eqe);
+                                       break;
+                               }
+                       }
+                       mlx4_cq_event(dev,
+                                     be32_to_cpu(eqe->event.cq_err.cqn)
+                                     & 0xffffff,
                                      eqe->type);
                        break;
 
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
                        break;
 
+               case MLX4_EVENT_TYPE_COMM_CHANNEL:
+                       if (!mlx4_is_master(dev)) {
+                               mlx4_warn(dev, "Received comm channel event "
+                                              "for non master device\n");
+                               break;
+                       }
+                       memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+                              eqe->event.comm_channel_arm.bit_vec,
+                              sizeof eqe->event.comm_channel_arm.bit_vec);
+                       queue_work(priv->mfunc.master.comm_wq,
+                                  &priv->mfunc.master.comm_work);
+                       break;
+
+               case MLX4_EVENT_TYPE_FLR_EVENT:
+                       flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+                       if (!mlx4_is_master(dev)) {
+                               mlx4_warn(dev, "Non-master function received"
+                                              "FLR event\n");
+                               break;
+                       }
+
+                       mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+                       if (flr_slave > dev->num_slaves) {
+                               mlx4_warn(dev,
+                                         "Got FLR for unknown function: %d\n",
+                                         flr_slave);
+                               update_slave_state = 0;
+                       } else
+                               update_slave_state = 1;
+
+                       spin_lock(&priv->mfunc.master.slave_state_lock);
+                       if (update_slave_state) {
+                               priv->mfunc.master.slave_state[flr_slave].active = false;
+                               priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+                               priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+                       }
+                       spin_unlock(&priv->mfunc.master.slave_state_lock);
+                       queue_work(priv->mfunc.master.comm_wq,
+                                  &priv->mfunc.master.slave_flr_event_work);
+                       break;
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
                case MLX4_EVENT_TYPE_ECC_DETECT:
                default:
-                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
-                                 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+                                 "index %u. owner=%x, nent=0x%x, slave=%x, "
+                                 "ownership=%s\n",
+                                 eqe->type, eqe->subtype, eq->eqn,
+                                 eq->cons_index, eqe->owner, eq->nent,
+                                 eqe->slave_id,
+                                 !!(eqe->owner & 0x80) ^
+                                 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
                        break;
-               }
+               };
 
                ++eq->cons_index;
                eqes_found = 1;
@@ -290,25 +505,58 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
        return IRQ_HANDLED;
 }
 
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq_info *event_eq =
+               &priv->mfunc.master.slave_state[slave].event_eq;
+       u32 in_modifier = vhcr->in_modifier;
+       u32 eqn = in_modifier & 0x1FF;
+       u64 in_param =  vhcr->in_param;
+       int err = 0;
+
+       if (slave == dev->caps.function)
+               err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+                              0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+                              MLX4_CMD_NATIVE);
+       if (!err) {
+               if (in_modifier >> 31) {
+                       /* unmap */
+                       event_eq->event_type &= ~in_param;
+               } else {
+                       event_eq->eqn = eqn;
+                       event_eq->event_type = in_param;
+               }
+       }
+       return err;
+}
+
 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
                        int eq_num)
 {
        return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
-                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+                       MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
-                           MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+                           0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+                           MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -585,14 +833,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
                priv->eq_table.uar_map[i] = NULL;
 
-       err = mlx4_map_clr_int(dev);
-       if (err)
-               goto err_out_bitmap;
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_map_clr_int(dev);
+               if (err)
+                       goto err_out_bitmap;
 
-       priv->eq_table.clr_mask =
-               swab32(1 << (priv->eq_table.inta_pin & 31));
-       priv->eq_table.clr_int  = priv->clr_base +
-               (priv->eq_table.inta_pin < 32 ? 4 : 0);
+               priv->eq_table.clr_mask =
+                       swab32(1 << (priv->eq_table.inta_pin & 31));
+               priv->eq_table.clr_int  = priv->clr_base +
+                       (priv->eq_table.inta_pin < 32 ? 4 : 0);
+       }
 
        priv->eq_table.irq_names =
                kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +950,8 @@ err_out_unmap:
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
                --i;
        }
-       mlx4_unmap_clr_int(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_unmap_clr_int(dev);
        mlx4_free_irqs(dev);
 
 err_out_bitmap:
@@ -725,7 +976,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
        for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
-       mlx4_unmap_clr_int(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_unmap_clr_int(dev);
 
        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
                if (priv->eq_table.uar_map[i])
@@ -748,7 +1000,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
 
        err = mlx4_NOP(dev);
        /* When not in MSI_X, there is only one irq to check */
-       if (!(dev->flags & MLX4_FLAG_MSI_X))
+       if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
                return err;
 
        /* A loop over all completion vectors, for each vector we will check
index 435ca6e..f03b54e 100644 (file)
@@ -32,6 +32,7 @@
  * SOFTWARE.
  */
 
+#include <linux/etherdevice.h>
 #include <linux/mlx4/cmd.h>
 #include <linux/module.h>
 #include <linux/cache.h>
@@ -139,12 +140,185 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
        MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
 
        err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
 
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       u8      field;
+       u32     size;
+       int     err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET            0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET                0x1
+#define QUERY_FUNC_CAP_FUNCTION_OFFSET         0x3
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET          0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET         0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET         0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET                0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET                0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET                0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET                0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET           0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET      0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET                0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET                0xc
+
+       if (vhcr->op_modifier == 1) {
+               field = vhcr->in_modifier;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+               field = 0; /* ensure fvl bit is not set */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+       } else if (vhcr->op_modifier == 0) {
+               field = 1 << 7; /* enable only ethernet interface */
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+               field = slave;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+
+               field = dev->caps.num_ports;
+               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+               size = 0; /* no PF behavious is set for now */
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+               size = dev->caps.num_qps;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+               size = dev->caps.num_srqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+               size = dev->caps.num_cqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+               size = dev->caps.num_eqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+               size = dev->caps.reserved_eqs;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+               size = dev->caps.num_mpts;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+               size = dev->caps.num_mtts;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+               size = dev->caps.num_mgms + dev->caps.num_amgms;
+               MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+       } else
+               err = -EINVAL;
+
+       return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32                     *outbox;
+       u8                      field;
+       u32                     size;
+       int                     i;
+       int                     err = 0;
+
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+       if (err)
+               goto out;
+
+       outbox = mailbox->buf;
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+       if (!(field & (1 << 7))) {
+               mlx4_err(dev, "The host doesn't support eth interface\n");
+               err = -EPROTONOSUPPORT;
+               goto out;
+       }
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
+       func_cap->function = field;
+
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+       func_cap->num_ports = field;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+       func_cap->pf_context_behaviour = size;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+       func_cap->qp_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+       func_cap->srq_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+       func_cap->cq_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+       func_cap->max_eq = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+       func_cap->reserved_eq = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+       func_cap->mpt_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+       func_cap->mtt_quota = size & 0xFFFFFF;
+
+       MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+       func_cap->mcg_quota = size & 0xFFFFFF;
+
+       for (i = 1; i <= func_cap->num_ports; ++i) {
+               err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+                                  MLX4_CMD_QUERY_FUNC_CAP,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       goto out;
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+               if (field & (1 << 7)) {
+                       mlx4_err(dev, "VLAN is enforced on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+
+               if (field & (1 << 6)) {
+                       mlx4_err(dev, "Force mac is enabled on this port\n");
+                       err = -EPROTONOSUPPORT;
+                       goto out;
+               }
+
+               MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+               func_cap->physical_port[i] = field;
+       }
+
+       /* All other resources are allocated by the master, but we still report
+        * 'num' and 'reserved' capabilities as follows:
+        * - num remains the maximum resource index
+        * - 'num - reserved' is the total available objects of a resource, but
+        *   resource indices may be less than 'reserved'
+        * TODO: set per-resource quotas */
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +403,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-                          MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
        if (err)
                goto out;
 
@@ -396,7 +570,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
                for (i = 1; i <= dev_cap->num_ports; ++i) {
                        err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-                                          MLX4_CMD_TIME_CLASS_B);
+                                          MLX4_CMD_TIME_CLASS_B,
+                                          !mlx4_is_slave(dev));
                        if (err)
                                goto out;
 
@@ -470,6 +645,54 @@ out:
        return err;
 }
 
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       u64 def_mac;
+       u8 port_type;
+       int err;
+
+       err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_NATIVE);
+
+       if (!err && dev->caps.function != slave) {
+               /* set slave default_mac address */
+               MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+               def_mac += slave << 8;
+               MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+               /* get port type - currently only eth is enabled */
+               MLX4_GET(port_type, outbox->buf,
+                        QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+               /* disable ib */
+               port_type &= 0xFE;
+
+               /* check eth is enabled for this port */
+               if (!(port_type & 2))
+                       mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+               MLX4_PUT(outbox->buf, port_type,
+                        QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+       }
+
+       return err;
+}
+
+static int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+       struct mlx4_cmd_mailbox *outbox = ptr;
+
+       return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+                           MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+                           MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +742,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
 
                        if (++nent == MLX4_MAILBOX_SIZE / 16) {
                                err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
-                                               MLX4_CMD_TIME_CLASS_B);
+                                               MLX4_CMD_TIME_CLASS_B,
+                                               MLX4_CMD_NATIVE);
                                if (err)
                                        goto out;
                                nent = 0;
@@ -528,7 +752,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
        }
 
        if (nent)
-               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+                              MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -557,13 +782,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
 
 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 
 int mlx4_RUN_FW(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +806,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 
 #define QUERY_FW_OUT_SIZE             0x100
 #define QUERY_FW_VER_OFFSET            0x00
+#define QUERY_FW_PPF_ID                       0x09
 #define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
 #define QUERY_FW_ERR_START_OFFSET      0x30
@@ -589,13 +817,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
 #define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
 
+#define QUERY_FW_COMM_BASE_OFFSET      0x40
+#define QUERY_FW_COMM_BAR_OFFSET       0x48
+
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -608,6 +839,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
                ((fw_ver & 0xffff0000ull) >> 16) |
                ((fw_ver & 0x0000ffffull) << 16);
 
+       MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+       dev->caps.function = lg;
+
        MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
        if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
            cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +883,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
        MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
        fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
 
+       MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+       MLX4_GET(fw->comm_bar,  outbox, QUERY_FW_COMM_BAR_OFFSET);
+       fw->comm_bar = (fw->comm_bar >> 6) * 2;
+       mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+                fw->comm_bar, fw->comm_base);
        mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
 
        /*
@@ -711,7 +950,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
        outbox = mailbox->buf;
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
-                          MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (err)
                goto out;
 
@@ -743,6 +982,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 #define         INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
 #define         INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
 #define         INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
+#define         INIT_HCA_EQE_CQE_OFFSETS        (INIT_HCA_QPC_OFFSET + 0x38)
 #define         INIT_HCA_ALTC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
 #define         INIT_HCA_AUXC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
 #define         INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1071,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
 
        /* UAR attributes */
 
-       MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_PUT(inbox, param->uar_page_sz,     INIT_HCA_UAR_PAGE_SZ_OFFSET);
        MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
 
-       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+                      MLX4_CMD_NATIVE);
 
        if (err)
                mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1084,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
        return err;
 }
 
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+                  struct mlx4_init_hca_param *param)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *outbox;
+       int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET   0x04
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+                          MLX4_CMD_QUERY_HCA,
+                          MLX4_CMD_TIME_CLASS_B,
+                          !mlx4_is_slave(dev));
+       if (err)
+               goto out;
+
+       MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+       /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+       MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
+       MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
+       MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
+       MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
+       MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
+       MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
+       MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
+       MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
+       MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+       MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+       /* multicast attributes */
+
+       MLX4_GET(param->mc_base,         outbox, INIT_HCA_MC_BASE_OFFSET);
+       MLX4_GET(param->log_mc_entry_sz, outbox,
+                INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+       MLX4_GET(param->log_mc_hash_sz,  outbox,
+                INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+       MLX4_GET(param->log_mc_table_sz, outbox,
+                INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+       /* TPT attributes */
+
+       MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
+       MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
+       MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+       /* UAR attributes */
+
+       MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port = vhcr->in_modifier;
+       int err;
+
+       if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+               return 0;
+
+       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+               return -ENODEV;
+
+       /* Enable port only if it was previously disabled */
+       if (!priv->mfunc.master.init_port_ref[port]) {
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+               if (err)
+                       return err;
+               priv->mfunc.master.slave_state[slave].init_port_mask |=
+                       (1 << port);
+       }
+       ++priv->mfunc.master.init_port_ref[port];
+       return 0;
+}
+
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1222,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
                MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
 
                err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 
                mlx4_free_cmd_mailbox(dev, mailbox);
        } else
                err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
 
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int port = vhcr->in_modifier;
+       int err;
+
+       if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+           (1 << port)))
+               return 0;
+
+       if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+               return -ENODEV;
+       if (priv->mfunc.master.init_port_ref[port] == 1) {
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+                              MLX4_CMD_NATIVE);
+               if (err)
+                       return err;
+       }
+       priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+       --priv->mfunc.master.init_port_ref[port];
+       return 0;
+}
+
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
 {
-       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+                       MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
 
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
 {
-       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+                       MLX4_CMD_NATIVE);
 }
 
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
        int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
                               MLX4_CMD_SET_ICM_SIZE,
-                              MLX4_CMD_TIME_CLASS_A);
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
        if (ret)
                return ret;
 
@@ -929,7 +1294,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 int mlx4_NOP(struct mlx4_dev *dev)
 {
        /* Input modifier of 0x1f means "finish as soon as possible." */
-       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
 }
 
 #define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1303,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
        return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
-                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+                           MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_read);
 
@@ -947,6 +1313,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
        u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
 
        return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
-                                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 EXPORT_SYMBOL_GPL(mlx4_wol_write);
index bf5ec22..3368363 100644 (file)
@@ -116,6 +116,23 @@ struct mlx4_dev_cap {
        u32 max_counters;
 };
 
+struct mlx4_func_cap {
+       u8      function;
+       u8      num_ports;
+       u8      flags;
+       u32     pf_context_behaviour;
+       int     qp_quota;
+       int     cq_quota;
+       int     srq_quota;
+       int     mpt_quota;
+       int     mtt_quota;
+       int     max_eq;
+       int     reserved_eq;
+       int     mcg_quota;
+       u8      physical_port[MLX4_MAX_PORTS + 1];
+       u8      port_flags[MLX4_MAX_PORTS + 1];
+};
+
 struct mlx4_adapter {
        char board_id[MLX4_BOARD_ID_LEN];
        u8   inta_pin;
@@ -133,6 +150,7 @@ struct mlx4_init_hca_param {
        u64 dmpt_base;
        u64 cmpt_base;
        u64 mtt_base;
+       u64 global_caps;
        u16 log_mc_entry_sz;
        u16 log_mc_hash_sz;
        u8  log_num_qps;
@@ -143,6 +161,7 @@ struct mlx4_init_hca_param {
        u8  log_mc_table_sz;
        u8  log_mpt_sz;
        u8  log_uar_sz;
+       u8  uar_page_sz; /* log pg sz in 4k chunks */
 };
 
 struct mlx4_init_ib_param {
@@ -167,12 +186,19 @@ struct mlx4_set_ib_param {
 };
 
 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
 int mlx4_UNMAP_FA(struct mlx4_dev *dev);
 int mlx4_RUN_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_FW(struct mlx4_dev *dev);
 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
index 02393fd..a9ade1c 100644 (file)
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
 {
        return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
 
 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
index ca6feb5..b4e9f6f 100644 (file)
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
                mlx4_add_device(intf, priv);
 
        mutex_unlock(&intf_mutex);
-       mlx4_start_catas_poll(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_start_catas_poll(dev);
 
        return 0;
 }
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_interface *intf;
 
-       mlx4_stop_catas_poll(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_stop_catas_poll(dev);
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
index 94bbc85..b969bfb 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/delay.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
 
 #endif /* CONFIG_PCI_MSI */
 
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+                       mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+                                        " of qp per mcg, for example:"
+                                        " 10 gives 248.range: 9<="
+                                        " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF                                        (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK            0
+#define PF_CONTEXT_BEHAVIOUR_MASK      0
+
 static char mlx4_version[] __devinitdata =
        DRV_NAME ": Mellanox ConnectX core driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
 
 static struct mlx4_profile default_profile = {
-       .num_qp         = 1 << 17,
+       .num_qp         = 1 << 18,
        .num_srq        = 1 << 16,
        .rdmarc_per_qp  = 1 << 4,
        .num_cq         = 1 << 16,
        .num_mcg        = 1 << 13,
-       .num_mpt        = 1 << 17,
+       .num_mpt        = 1 << 19,
        .num_mtt        = 1 << 20,
 };
 
-static int log_num_mac = 2;
+static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
 
@@ -104,10 +126,27 @@ module_param_named(use_prio, use_prio, bool, 0444);
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
                  "(0/1, default 0)");
 
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
 
+static int port_type_array[2] = {1, 1};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: IB by default");
+
+struct mlx4_port_config {
+       struct list_head list;
+       enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+       struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+       return dev->caps.reserved_eqs +
+               MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
 int mlx4_check_port_params(struct mlx4_dev *dev,
                           enum mlx4_port_type *port_type)
 {
@@ -140,10 +179,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
 {
        int i;
 
-       dev->caps.port_mask = 0;
        for (i = 1; i <= dev->caps.num_ports; ++i)
-               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
-                       dev->caps.port_mask |= 1 << (i - 1);
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
 }
 
 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -194,6 +231,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev->caps.trans_code[i]     = dev_cap->trans_code[i];
        }
 
+       dev->caps.uar_page_size      = PAGE_SIZE;
        dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
        dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
        dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
@@ -207,7 +245,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
        dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
        dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
-       dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
+       dev->caps.num_qp_per_mgm     = mlx4_get_qp_per_mgm(dev);
        /*
         * Subtract 1 from the limit because we need to allocate a
         * spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +254,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
        dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
        dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
-       dev->caps.mtts_per_seg       = 1 << log_mtts_per_seg;
-       dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
-                                                   dev->caps.mtts_per_seg);
+       dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
        dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
-       dev->caps.reserved_uars      = dev_cap->reserved_uars;
+
+       /* The first 128 UARs are used for EQ doorbells */
+       dev->caps.reserved_uars      = max_t(int, 128, dev_cap->reserved_uars);
        dev->caps.reserved_pds       = dev_cap->reserved_pds;
        dev->caps.reserved_xrcds     = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
                                        dev_cap->reserved_xrcds : 0;
        dev->caps.max_xrcds          = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
                                        dev_cap->max_xrcds : 0;
-       dev->caps.mtt_entry_sz       = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+       dev->caps.mtt_entry_sz       = dev_cap->mtt_entry_sz;
+
        dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
        dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
        dev->caps.flags              = dev_cap->flags;
@@ -240,10 +279,36 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.log_num_prios = use_prio ? 3 : 0;
 
        for (i = 1; i <= dev->caps.num_ports; ++i) {
-               if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
-               else
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+               dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+               if (dev->caps.supported_type[i]) {
+                       /* if only ETH is supported - assign ETH */
+                       if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+                               dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+                       /* if only IB is supported,
+                        * assign IB only if SRIOV is off*/
+                       else if (dev->caps.supported_type[i] ==
+                                MLX4_PORT_TYPE_IB) {
+                               if (dev->flags & MLX4_FLAG_SRIOV)
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_NONE;
+                               else
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_IB;
+                       /* if IB and ETH are supported,
+                        * first of all check if SRIOV is on */
+                       } else if (dev->flags & MLX4_FLAG_SRIOV)
+                               dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+                       /* if IB and ETH are supported and SRIOV is off
+                        * use module parameters */
+                       else {
+                               if (port_type_array[i-1])
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_IB;
+                               else
+                                       dev->caps.port_type[i] =
+                                               MLX4_PORT_TYPE_ETH;
+                       }
+               }
                dev->caps.possible_type[i] = dev->caps.port_type[i];
                mlx4_priv(dev)->sense.sense_allowed[i] =
                        dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
@@ -262,8 +327,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
-       mlx4_set_port_mask(dev);
-
        dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
 
        dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +345,149 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 
        return 0;
 }
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_state;
+       int i;
+       int ret = 0;
+
+       for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+               s_state = &priv->mfunc.master.slave_state[i];
+               if (s_state->active && s_state->last_cmd !=
+                   MLX4_COMM_CMD_RESET) {
+                       mlx4_warn(dev, "%s: slave: %d is still active\n",
+                                 __func__, i);
+                       ret++;
+               }
+       }
+       return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_state *s_slave;
+
+       if (!mlx4_is_master(dev))
+               return 0;
+
+       s_slave = &priv->mfunc.master.slave_state[slave];
+       return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+       int                        err;
+       u32                        page_size;
+       struct mlx4_dev_cap        dev_cap;
+       struct mlx4_func_cap       func_cap;
+       struct mlx4_init_hca_param hca_param;
+       int                        i;
+
+       memset(&hca_param, 0, sizeof(hca_param));
+       err = mlx4_QUERY_HCA(dev, &hca_param);
+       if (err) {
+               mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+               return err;
+       }
+
+       /*fail if the hca has an unknown capability */
+       if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+           HCA_GLOBAL_CAP_MASK) {
+               mlx4_err(dev, "Unknown hca global capabilities\n");
+               return -ENOSYS;
+       }
+
+       mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+       memset(&dev_cap, 0, sizeof(dev_cap));
+       err = mlx4_dev_cap(dev, &dev_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       page_size = ~dev->caps.page_size_cap + 1;
+       mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+       if (page_size > PAGE_SIZE) {
+               mlx4_err(dev, "HCA minimum page size of %d bigger than "
+                        "kernel PAGE_SIZE of %ld, aborting.\n",
+                        page_size, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       /* slave gets uar page size from QUERY_HCA fw command */
+       dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+       /* TODO: relax this assumption */
+       if (dev->caps.uar_page_size != PAGE_SIZE) {
+               mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+                        dev->caps.uar_page_size, PAGE_SIZE);
+               return -ENODEV;
+       }
+
+       memset(&func_cap, 0, sizeof(func_cap));
+       err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+           PF_CONTEXT_BEHAVIOUR_MASK) {
+               mlx4_err(dev, "Unknown pf context behaviour\n");
+               return -ENOSYS;
+       }
+
+       dev->caps.function              = func_cap.function;
+       dev->caps.num_ports             = func_cap.num_ports;
+       dev->caps.num_qps               = func_cap.qp_quota;
+       dev->caps.num_srqs              = func_cap.srq_quota;
+       dev->caps.num_cqs               = func_cap.cq_quota;
+       dev->caps.num_eqs               = func_cap.max_eq;
+       dev->caps.reserved_eqs          = func_cap.reserved_eq;
+       dev->caps.num_mpts              = func_cap.mpt_quota;
+       dev->caps.num_mtts              = func_cap.mtt_quota;
+       dev->caps.num_pds               = MLX4_NUM_PDS;
+       dev->caps.num_mgms              = 0;
+       dev->caps.num_amgms             = 0;
+
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+       if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+                        "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+               return -ENODEV;
+       }
+
+       if (dev->caps.uar_page_size * (dev->caps.num_uars -
+                                      dev->caps.reserved_uars) >
+                                      pci_resource_len(dev->pdev, 2)) {
+               mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+                        "PCI resource 2 size of 0x%llx, aborting.\n",
+                        dev->caps.uar_page_size * dev->caps.num_uars,
+                        (unsigned long long) pci_resource_len(dev->pdev, 2));
+               return -ENODEV;
+       }
+
+#if 0
+       mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+       mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+                 dev->caps.num_uars, dev->caps.reserved_uars,
+                 dev->caps.uar_page_size * dev->caps.num_uars,
+                 pci_resource_len(dev->pdev, 2));
+       mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+                 dev->caps.reserved_eqs);
+       mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+                 dev->caps.num_pds, dev->caps.reserved_pds,
+                 dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+       return 0;
+}
 
 /*
  * Change the port configuration of the device.
@@ -451,6 +657,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        int err;
+       int num_eqs;
 
        err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
                                  cmpt_base +
@@ -480,12 +687,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
        if (err)
                goto err_srq;
 
+       num_eqs = (mlx4_is_master(dev)) ?
+               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+               dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
                                  cmpt_base +
                                  ((u64) (MLX4_CMPT_TYPE_EQ *
                                          cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+                                 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
        if (err)
                goto err_cq;
 
@@ -509,6 +718,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        u64 aux_pages;
+       int num_eqs;
        int err;
 
        err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +750,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                goto err_unmap_aux;
        }
 
+
+       num_eqs = (mlx4_is_master(dev)) ?
+               roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+               dev->caps.num_eqs;
        err = mlx4_init_icm_table(dev, &priv->eq_table.table,
                                  init_hca->eqc_base, dev_cap->eqc_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs,
-                                 0, 0);
+                                 num_eqs, num_eqs, 0, 0);
        if (err) {
                mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
                goto err_unmap_cmpt;
@@ -563,7 +776,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
        err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
                                  init_hca->mtt_base,
                                  dev->caps.mtt_entry_sz,
-                                 dev->caps.num_mtt_segs,
+                                 dev->caps.num_mtts,
                                  dev->caps.reserved_mtts, 1, 0);
        if (err) {
                mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +863,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
         * and it's a lot easier than trying to track ref counts.
         */
        err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
-                                 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+                                 init_hca->mc_base,
+                                 mlx4_get_mgm_entry_size(dev),
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  dev->caps.num_mgms + dev->caps.num_amgms,
                                  0, 0);
@@ -726,6 +940,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
        mlx4_free_icm(dev, priv->fw.aux_icm, 0);
 }
 
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       down(&priv->cmd.slave_sem);
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+               mlx4_warn(dev, "Failed to close slave function.\n");
+       up(&priv->cmd.slave_sem);
+}
+
 static int map_bf_area(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +957,10 @@ static int map_bf_area(struct mlx4_dev *dev)
        resource_size_t bf_len;
        int err = 0;
 
-       bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
-       bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+       bf_start = pci_resource_start(dev->pdev, 2) +
+                       (dev->caps.num_uars << PAGE_SHIFT);
+       bf_len = pci_resource_len(dev->pdev, 2) -
+                       (dev->caps.num_uars << PAGE_SHIFT);
        priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
        if (!priv->bf_mapping)
                err = -ENOMEM;
@@ -751,10 +977,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
 static void mlx4_close_hca(struct mlx4_dev *dev)
 {
        unmap_bf_area(dev);
-       mlx4_CLOSE_HCA(dev, 0);
-       mlx4_free_icms(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+       if (mlx4_is_slave(dev))
+               mlx4_slave_exit(dev);
+       else {
+               mlx4_CLOSE_HCA(dev, 0);
+               mlx4_free_icms(dev);
+               mlx4_UNMAP_FA(dev);
+               mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+       }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u64 dma = (u64) priv->mfunc.vhcr_dma;
+       int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+       int ret_from_reset = 0;
+       u32 slave_read;
+       u32 cmd_channel_ver;
+
+       down(&priv->cmd.slave_sem);
+       priv->cmd.max_cmds = 1;
+       mlx4_warn(dev, "Sending reset\n");
+       ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+                                      MLX4_COMM_TIME);
+       /* if we are in the middle of flr the slave will try
+        * NUM_OF_RESET_RETRIES times before leaving.*/
+       if (ret_from_reset) {
+               if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+                       msleep(SLEEP_TIME_IN_RESET);
+                       while (ret_from_reset && num_of_reset_retries) {
+                               mlx4_warn(dev, "slave is currently in the"
+                                         "middle of FLR. retrying..."
+                                         "(try num:%d)\n",
+                                         (NUM_OF_RESET_RETRIES -
+                                          num_of_reset_retries  + 1));
+                               ret_from_reset =
+                                       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+                                                     0, MLX4_COMM_TIME);
+                               num_of_reset_retries = num_of_reset_retries - 1;
+                       }
+               } else
+                       goto err;
+       }
+
+       /* check the driver version - the slave I/F revision
+        * must match the master's */
+       slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+       cmd_channel_ver = mlx4_comm_get_version();
+
+       if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+               MLX4_COMM_GET_IF_REV(slave_read)) {
+               mlx4_err(dev, "slave driver version is not supported"
+                        " by the master\n");
+               goto err;
+       }
+
+       mlx4_warn(dev, "Sending vhcr0\n");
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+                                                   MLX4_COMM_TIME))
+               goto err;
+       if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+               goto err;
+       up(&priv->cmd.slave_sem);
+       return 0;
+
+err:
+       mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+       up(&priv->cmd.slave_sem);
+       return -EIO;
 }
 
 static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1065,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        u64 icm_size;
        int err;
 
-       err = mlx4_QUERY_FW(dev);
-       if (err) {
-               if (err == -EACCES)
-                       mlx4_info(dev, "non-primary physical function, skipping.\n");
-               else
-                       mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-               return err;
-       }
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_QUERY_FW(dev);
+               if (err) {
+                       if (err == -EACCES)
+                               mlx4_info(dev, "non-primary physical function, skipping.\n");
+                       else
+                               mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+                       goto unmap_bf;
+               }
 
-       err = mlx4_load_fw(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to start FW, aborting.\n");
-               return err;
-       }
+               err = mlx4_load_fw(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to start FW, aborting.\n");
+                       goto unmap_bf;
+               }
 
-       mlx4_cfg.log_pg_sz_m = 1;
-       mlx4_cfg.log_pg_sz = 0;
-       err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
-       if (err)
-               mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+               mlx4_cfg.log_pg_sz_m = 1;
+               mlx4_cfg.log_pg_sz = 0;
+               err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+               if (err)
+                       mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
 
-       err = mlx4_dev_cap(dev, &dev_cap);
-       if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-               goto err_stop_fw;
-       }
+               err = mlx4_dev_cap(dev, &dev_cap);
+               if (err) {
+                       mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+                       goto err_stop_fw;
+               }
 
-       profile = default_profile;
+               profile = default_profile;
 
-       icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
-       if ((long long) icm_size < 0) {
-               err = icm_size;
-               goto err_stop_fw;
-       }
+               icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+                                            &init_hca);
+               if ((long long) icm_size < 0) {
+                       err = icm_size;
+                       goto err_stop_fw;
+               }
 
-       if (map_bf_area(dev))
-               mlx4_dbg(dev, "Failed to map blue flame area\n");
+               init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+               init_hca.uar_page_sz = PAGE_SHIFT - 12;
 
-       init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+               err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+               if (err)
+                       goto err_stop_fw;
 
-       err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
-       if (err)
-               goto err_stop_fw;
+               err = mlx4_INIT_HCA(dev, &init_hca);
+               if (err) {
+                       mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+                       goto err_free_icm;
+               }
+       } else {
+               err = mlx4_init_slave(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize slave\n");
+                       goto unmap_bf;
+               }
 
-       err = mlx4_INIT_HCA(dev, &init_hca);
-       if (err) {
-               mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
-               goto err_free_icm;
+               err = mlx4_slave_cap(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to obtain slave caps\n");
+                       goto err_close;
+               }
        }
 
+       if (map_bf_area(dev))
+               mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+       /*Only the master set the ports, all the rest got it from it.*/
+       if (!mlx4_is_slave(dev))
+               mlx4_set_port_mask(dev);
+
        err = mlx4_QUERY_ADAPTER(dev, &adapter);
        if (err) {
                mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1147,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        return 0;
 
 err_close:
-       mlx4_CLOSE_HCA(dev, 0);
+       mlx4_close_hca(dev);
 
 err_free_icm:
-       mlx4_free_icms(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_free_icms(dev);
 
 err_stop_fw:
+       if (!mlx4_is_slave(dev)) {
+               mlx4_UNMAP_FA(dev);
+               mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+       }
+unmap_bf:
        unmap_bf_area(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
        return err;
 }
 
@@ -986,55 +1306,62 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
                goto err_srq_table_free;
        }
 
-       err = mlx4_init_mcg_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "multicast group table, aborting.\n");
-               goto err_qp_table_free;
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_mcg_table(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to initialize "
+                                "multicast group table, aborting.\n");
+                       goto err_qp_table_free;
+               }
        }
 
        err = mlx4_init_counters_table(dev);
        if (err && err != -ENOENT) {
                mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
-               goto err_counters_table_free;
+               goto err_mcg_table_free;
        }
 
-       for (port = 1; port <= dev->caps.num_ports; port++) {
-               enum mlx4_port_type port_type = 0;
-               mlx4_SENSE_PORT(dev, port, &port_type);
-               if (port_type)
-                       dev->caps.port_type[port] = port_type;
-               ib_port_default_caps = 0;
-               err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d default "
-                                 "ib capabilities (%d). Continuing with "
-                                 "caps = 0\n", port, err);
-               dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
-               err = mlx4_check_ext_port_caps(dev, port);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d extended "
-                                 "port capabilities support info (%d)."
-                                 " Assuming not supported\n", port, err);
+       if (!mlx4_is_slave(dev)) {
+               for (port = 1; port <= dev->caps.num_ports; port++) {
+                       if (!mlx4_is_mfunc(dev)) {
+                               enum mlx4_port_type port_type = 0;
+                               mlx4_SENSE_PORT(dev, port, &port_type);
+                               if (port_type)
+                                       dev->caps.port_type[port] = port_type;
+                       }
+                       ib_port_default_caps = 0;
+                       err = mlx4_get_port_ib_caps(dev, port,
+                                                   &ib_port_default_caps);
+                       if (err)
+                               mlx4_warn(dev, "failed to get port %d default "
+                                         "ib capabilities (%d). Continuing "
+                                         "with caps = 0\n", port, err);
+                       dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+                       err = mlx4_check_ext_port_caps(dev, port);
+                       if (err)
+                               mlx4_warn(dev, "failed to get port %d extended "
+                                         "port capabilities support info (%d)."
+                                         " Assuming not supported\n",
+                                         port, err);
 
-               err = mlx4_SET_PORT(dev, port);
-               if (err) {
-                       mlx4_err(dev, "Failed to set port %d, aborting\n",
-                               port);
-                       goto err_mcg_table_free;
+                       err = mlx4_SET_PORT(dev, port);
+                       if (err) {
+                               mlx4_err(dev, "Failed to set port %d, aborting\n",
+                                       port);
+                               goto err_counters_table_free;
+                       }
                }
        }
-       mlx4_set_port_mask(dev);
 
        return 0;
 
-err_mcg_table_free:
-       mlx4_cleanup_mcg_table(dev);
-
 err_counters_table_free:
        mlx4_cleanup_counters_table(dev);
 
+err_mcg_table_free:
+       mlx4_cleanup_mcg_table(dev);
+
 err_qp_table_free:
        mlx4_cleanup_qp_table(dev);
 
@@ -1081,8 +1408,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
        int i;
 
        if (msi_x) {
-               nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
-                            nreq);
+               /* In multifunction mode each function gets 2 msi-X vectors
+                * one for data path completions anf the other for asynch events
+                * or command completions */
+               if (mlx4_is_mfunc(dev)) {
+                       nreq = 2;
+               } else {
+                       nreq = min_t(int, dev->caps.num_eqs -
+                                    dev->caps.reserved_eqs, nreq);
+               }
+
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
                        goto no_msi;
@@ -1138,16 +1473,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
 
        info->dev = dev;
        info->port = port;
-       mlx4_init_mac_table(dev, &info->mac_table);
-       mlx4_init_vlan_table(dev, &info->vlan_table);
-       info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+       if (!mlx4_is_slave(dev)) {
+               INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+               mlx4_init_mac_table(dev, &info->mac_table);
+               mlx4_init_vlan_table(dev, &info->vlan_table);
+               info->base_qpn =
+                       dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
                        (port - 1) * (1 << log_num_mac);
+       }
 
        sprintf(info->dev_name, "mlx4_port%d", port);
        info->port_attr.attr.name = info->dev_name;
-       info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+       if (mlx4_is_mfunc(dev))
+               info->port_attr.attr.mode = S_IRUGO;
+       else {
+               info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+               info->port_attr.store     = set_port_type;
+       }
        info->port_attr.show      = show_port_type;
-       info->port_attr.store     = set_port_type;
        sysfs_attr_init(&info->port_attr.attr);
 
        err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1563,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
        kfree(priv->steer);
 }
 
+static int extended_func_num(struct pci_dev *pdev)
+{
+       return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE        0x8069c
+#define MLX4_OWNER_SIZE        4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+       void __iomem *owner;
+       u32 ret;
+
+       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+                       MLX4_OWNER_SIZE);
+       if (!owner) {
+               mlx4_err(dev, "Failed to obtain ownership bit\n");
+               return -ENOMEM;
+       }
+
+       ret = readl(owner);
+       iounmap(owner);
+       return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+       void __iomem *owner;
+
+       owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+                       MLX4_OWNER_SIZE);
+       if (!owner) {
+               mlx4_err(dev, "Failed to obtain ownership bit\n");
+               return;
+       }
+       writel(0, owner);
+       msleep(1000);
+       iounmap(owner);
+}
+
 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct mlx4_priv *priv;
@@ -1235,13 +1618,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                        "aborting.\n");
                return err;
        }
-
+       if (num_vfs > MLX4_MAX_NUM_VF) {
+               printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+                      num_vfs, MLX4_MAX_NUM_VF);
+               return -EINVAL;
+       }
        /*
-        * Check for BARs.  We expect 0: 1MB
+        * Check for BARs.
         */
-       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-           pci_resource_len(pdev, 0) != 1 << 20) {
-               dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+       if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+           !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev, "Missing DCS, aborting."
+                       "(id == 0X%p, id->driver_data: 0x%lx,"
+                       " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+                       id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
                err = -ENODEV;
                goto err_disable_pdev;
        }
@@ -1305,42 +1695,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        mutex_init(&priv->bf_mutex);
 
        dev->rev_id = pdev->revision;
+       /* Detect if this device is a virtual function */
+       if (id && id->driver_data & MLX4_VF) {
+               /* When acting as pf, we normally skip vfs unless explicitly
+                * requested to probe them. */
+               if (num_vfs && extended_func_num(pdev) > probe_vf) {
+                       mlx4_warn(dev, "Skipping virtual function:%d\n",
+                                               extended_func_num(pdev));
+                       err = -ENODEV;
+                       goto err_free_dev;
+               }
+               mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+               dev->flags |= MLX4_FLAG_SLAVE;
+       } else {
+               /* We reset the device and enable SRIOV only for physical
+                * devices.  Try to claim ownership on the device;
+                * if already taken, skip -- do not allow multiple PFs */
+               err = mlx4_get_ownership(dev);
+               if (err) {
+                       if (err < 0)
+                               goto err_free_dev;
+                       else {
+                               mlx4_warn(dev, "Multiple PFs not yet supported."
+                                         " Skipping PF.\n");
+                               err = -EINVAL;
+                               goto err_free_dev;
+                       }
+               }
 
-       /*
-        * Now reset the HCA before we touch the PCI capabilities or
-        * attempt a firmware command, since a boot ROM may have left
-        * the HCA in an undefined state.
-        */
-       err = mlx4_reset(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to reset HCA, aborting.\n");
-               goto err_free_dev;
+               if (num_vfs) {
+                       mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+                       err = pci_enable_sriov(pdev, num_vfs);
+                       if (err) {
+                               mlx4_err(dev, "Failed to enable sriov,"
+                                        "continuing without sriov enabled"
+                                        " (err = %d).\n", err);
+                               num_vfs = 0;
+                               err = 0;
+                       } else {
+                               mlx4_warn(dev, "Running in master mode\n");
+                               dev->flags |= MLX4_FLAG_SRIOV |
+                                             MLX4_FLAG_MASTER;
+                               dev->num_vfs = num_vfs;
+                       }
+               }
+
+               /*
+                * Now reset the HCA before we touch the PCI capabilities or
+                * attempt a firmware command, since a boot ROM may have left
+                * the HCA in an undefined state.
+                */
+               err = mlx4_reset(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+                       goto err_rel_own;
+               }
        }
 
+slave_start:
        if (mlx4_cmd_init(dev)) {
                mlx4_err(dev, "Failed to init command interface, aborting.\n");
-               goto err_free_dev;
+               goto err_sriov;
+       }
+
+       /* In slave functions, the communication channel must be initialized
+        * before posting commands. Also, init num_slaves before calling
+        * mlx4_init_hca */
+       if (mlx4_is_mfunc(dev)) {
+               if (mlx4_is_master(dev))
+                       dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+               else {
+                       dev->num_slaves = 0;
+                       if (mlx4_multi_func_init(dev)) {
+                               mlx4_err(dev, "Failed to init slave mfunc"
+                                        " interface, aborting.\n");
+                               goto err_cmd;
+                       }
+               }
        }
 
        err = mlx4_init_hca(dev);
-       if (err)
-               goto err_cmd;
+       if (err) {
+               if (err == -EACCES) {
+                       /* Not primary Physical function
+                        * Running in slave mode */
+                       mlx4_cmd_cleanup(dev);
+                       dev->flags |= MLX4_FLAG_SLAVE;
+                       dev->flags &= ~MLX4_FLAG_MASTER;
+                       goto slave_start;
+               } else
+                       goto err_mfunc;
+       }
+
+       /* In master functions, the communication channel must be initialized
+        * after obtaining its address from fw */
+       if (mlx4_is_master(dev)) {
+               if (mlx4_multi_func_init(dev)) {
+                       mlx4_err(dev, "Failed to init master mfunc"
+                                "interface, aborting.\n");
+                       goto err_close;
+               }
+       }
 
        err = mlx4_alloc_eq_table(dev);
        if (err)
-               goto err_close;
+               goto err_master_mfunc;
 
        priv->msix_ctl.pool_bm = 0;
        spin_lock_init(&priv->msix_ctl.pool_lock);
 
        mlx4_enable_msi_x(dev);
-
-       err = mlx4_init_steering(dev);
-       if (err)
+       if ((mlx4_is_mfunc(dev)) &&
+           !(dev->flags & MLX4_FLAG_MSI_X)) {
+               mlx4_err(dev, "INTx is not supported in multi-function mode."
+                        " aborting.\n");
                goto err_free_eq;
+       }
+
+       if (!mlx4_is_slave(dev)) {
+               err = mlx4_init_steering(dev);
+               if (err)
+                       goto err_free_eq;
+       }
 
        err = mlx4_setup_hca(dev);
-       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+           !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
@@ -1383,20 +1863,37 @@ err_port:
        mlx4_cleanup_uar_table(dev);
 
 err_steer:
-       mlx4_clear_steering(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_clear_steering(dev);
 
 err_free_eq:
        mlx4_free_eq_table(dev);
 
+err_master_mfunc:
+       if (mlx4_is_master(dev))
+               mlx4_multi_func_cleanup(dev);
+
 err_close:
        if (dev->flags & MLX4_FLAG_MSI_X)
                pci_disable_msix(pdev);
 
        mlx4_close_hca(dev);
 
+err_mfunc:
+       if (mlx4_is_slave(dev))
+               mlx4_multi_func_cleanup(dev);
+
 err_cmd:
        mlx4_cmd_cleanup(dev);
 
+err_sriov:
+       if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+               pci_disable_sriov(pdev);
+
+err_rel_own:
+       if (!mlx4_is_slave(dev))
+               mlx4_free_ownership(dev);
+
 err_free_dev:
        kfree(priv);
 
@@ -1424,6 +1921,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        int p;
 
        if (dev) {
+               /* in SRIOV it is not allowed to unload the pf's
+                * driver while there are alive vf's */
+               if (mlx4_is_master(dev)) {
+                       if (mlx4_how_many_lives_vf(dev))
+                               printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+               }
                mlx4_stop_sense(dev);
                mlx4_unregister_device(dev);
 
@@ -1443,17 +1946,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                mlx4_cleanup_xrcd_table(dev);
                mlx4_cleanup_pd_table(dev);
 
+               if (mlx4_is_master(dev))
+                       mlx4_free_resource_tracker(dev);
+
                iounmap(priv->kar);
                mlx4_uar_free(dev, &priv->driver_uar);
                mlx4_cleanup_uar_table(dev);
-               mlx4_clear_steering(dev);
+               if (!mlx4_is_slave(dev))
+                       mlx4_clear_steering(dev);
                mlx4_free_eq_table(dev);
+               if (mlx4_is_master(dev))
+                       mlx4_multi_func_cleanup(dev);
                mlx4_close_hca(dev);
+               if (mlx4_is_slave(dev))
+                       mlx4_multi_func_cleanup(dev);
                mlx4_cmd_cleanup(dev);
 
                if (dev->flags & MLX4_FLAG_MSI_X)
                        pci_disable_msix(pdev);
+               if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+                       mlx4_warn(dev, "Disabling sriov\n");
+                       pci_disable_sriov(pdev);
+               }
 
+               if (!mlx4_is_slave(dev))
+                       mlx4_free_ownership(dev);
                kfree(priv);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
@@ -1468,33 +1985,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
 }
 
 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
-       { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
-       { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
-       { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
-       { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
-       { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-       { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
-       { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+       /* MT25408 "Hermon" SDR */
+       { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+       /* MT25408 "Hermon" DDR */
+       { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+       /* MT25408 "Hermon" QDR */
+       { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+       /* MT25408 "Hermon" DDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+       /* MT25408 "Hermon" QDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+       /* MT25408 "Hermon" EN 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+       /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+       /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+       /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+       /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+       { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+       /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+       { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+       /* MT26478 ConnectX2 40GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+       /* MT25400 Family [ConnectX-2 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+       /* MT27500 Family [ConnectX-3] */
+       { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+       /* MT27500 Family [ConnectX-3 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+       { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
        { 0, }
 };
 
@@ -1523,6 +2055,12 @@ static int __init mlx4_verify_params(void)
                return -1;
        }
 
+       /* Check if module param for ports type has legal combination */
+       if (port_type_array[0] == false && port_type_array[1] == true) {
+               printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+               port_type_array[0] = true;
+       }
+
        return 0;
 }
 
index 978688c..0785d9b 100644 (file)
 
 static const u8 zero_gid[16];  /* automatically initialized to 0 */
 
+struct mlx4_mgm {
+       __be32                  next_gid_index;
+       __be32                  members_count;
+       u32                     reserved[2];
+       u8                      gid[16];
+       __be32                  qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+       return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+       return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
                           struct mlx4_cmd_mailbox *mailbox)
 {
        return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
                            struct mlx4_cmd_mailbox *mailbox)
 {
        return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
-                       MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 }
 
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
                              struct mlx4_cmd_mailbox *mailbox)
 {
        u32 in_mod;
 
-       in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+       in_mod = (u32) port << 16 | steer << 1;
        return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
-                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_NATIVE);
 }
 
 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
        int err;
 
        err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
-                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
 
        if (!err)
                *hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  * Add new entry to steering data structure.
  * All promisc QPs should be added as well
  */
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
                              enum mlx4_steer_type steer,
                              unsigned int index, u32 qpn)
 {
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        struct mlx4_promisc_qp *dqp = NULL;
        u32 prot;
        int err;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
        new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
        if (!new_entry)
                return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        /* If the given qpn is also a promisc qp,
         * it should be inserted to duplicates list
         */
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (pqp) {
                dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
                if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
                /* don't add already existing qpn */
                if (pqp->qpn == qpn)
                        continue;
-               if (members_count == MLX4_QP_PER_MGM) {
+               if (members_count == dev->caps.num_qp_per_mgm) {
                        /* out of space */
                        err = -ENOMEM;
                        goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
 }
 
 /* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
                                   enum mlx4_steer_type steer,
                                   unsigned int index, u32 qpn)
 {
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        struct mlx4_steer_index *tmp_entry, *entry = NULL;
        struct mlx4_promisc_qp *pqp;
        struct mlx4_promisc_qp *dqp;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (!pqp)
                return 0; /* nothing to do */
 
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
         * we need to add it as a duplicate to this entry
         * for future references */
        list_for_each_entry(dqp, &entry->duplicates, list) {
-               if (qpn == dqp->qpn)
+               if (qpn == pqp->qpn)
                        return 0; /* qp is already duplicated */
        }
 
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
 
 /* Check whether a qpn is a duplicate on steering entry
  * If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
                                  enum mlx4_steer_type steer,
                                  unsigned int index, u32 qpn)
 {
        struct mlx4_steer *s_steer;
        struct mlx4_steer_index *tmp_entry, *entry = NULL;
        struct mlx4_promisc_qp *dqp, *tmp_dqp;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        /* if qp is not promisc, it cannot be duplicated */
-       if (!get_promisc_qp(dev, pf_num, steer, qpn))
+       if (!get_promisc_qp(dev, 0, steer, qpn))
                return false;
 
        /* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
 }
 
 /* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
                                      enum mlx4_steer_type steer,
                                      unsigned int index, u32 tqpn)
 {
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        u32 members_count;
        bool ret = false;
        int i;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
        for (i = 0;  i < members_count; i++) {
                qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
-               if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+               if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
                        /* the qp is not promisc, the entry can't be removed */
                        goto out;
                }
@@ -332,7 +344,7 @@ out:
        return ret;
 }
 
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
                          enum mlx4_steer_type steer, u32 qpn)
 {
        struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
        bool found;
        int last_index;
        int err;
-       u8 pf_num;
        struct mlx4_priv *priv = mlx4_priv(dev);
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       s_steer = &mlx4_priv(dev)->steer[0];
 
        mutex_lock(&priv->mcg_table.mutex);
 
-       if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+       if (get_promisc_qp(dev, 0, steer, qpn)) {
                err = 0;  /* Noting to do, already exists */
                goto out_mutex;
        }
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                }
                if (!found) {
                        /* Need to add the qpn to mgm */
-                       if (members_count == MLX4_QP_PER_MGM) {
+                       if (members_count == dev->caps.num_qp_per_mgm) {
                                /* entry is full */
                                err = -ENOMEM;
                                goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
        if (err)
                goto out_list;
 
@@ -439,7 +450,7 @@ out_mutex:
        return err;
 }
 
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
                             enum mlx4_steer_type steer, u32 qpn)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
        bool back_to_list = false;
        int loc, i;
        int err;
-       u8 pf_num;
 
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       s_steer = &mlx4_priv(dev)->steer[0];
        mutex_lock(&priv->mcg_table.mutex);
 
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       pqp = get_promisc_qp(dev, 0, steer, qpn);
        if (unlikely(!pqp)) {
                mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
                /* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
                goto out_list;
        }
        mgm = mailbox->buf;
+       memset(mgm, 0, sizeof *mgm);
        members_count = 0;
        list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
        if (err)
                goto out_mailbox;
 
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                }
                index += dev->caps.num_mgms;
 
+               new_entry = 1;
                memset(mgm, 0, sizeof *mgm);
                memcpy(mgm->gid, gid, 16);
        }
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       if (members_count == MLX4_QP_PER_MGM) {
+       if (members_count == dev->caps.num_qp_per_mgm) {
                mlx4_err(dev, "MGM at index %x is full.\n", index);
                err = -ENOMEM;
                goto out;
@@ -696,9 +707,9 @@ out:
        if (prot == MLX4_PROT_ETH) {
                /* manage the steering entry for promisc mode */
                if (new_entry)
-                       new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+                       new_steering_entry(dev, port, steer, index, qp->qpn);
                else
-                       existing_steering_entry(dev, 0, port, steer,
+                       existing_steering_entry(dev, port, steer,
                                                index, qp->qpn);
        }
        if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 
        /* if this pq is also a promisc qp, it shouldn't be removed */
        if (prot == MLX4_PROT_ETH &&
-           check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+           check_duplicate_entry(dev, port, steer, index, qp->qpn))
                goto out;
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        mgm->qp[i - 1]     = 0;
 
        if (prot == MLX4_PROT_ETH)
-               removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+               removed_entry = can_remove_steering_entry(dev, port, steer,
+                                                               index, qp->qpn);
        if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
                goto out;
@@ -828,6 +840,34 @@ out:
        return err;
 }
 
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                         u8 gid[16], u8 attach, u8 block_loopback,
+                         enum mlx4_protocol prot)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       int err = 0;
+       int qpn;
+
+       if (!mlx4_is_mfunc(dev))
+               return -EBADF;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, gid, 16);
+       qpn = qp->qpn;
+       qpn |= (prot << 28);
+       if (attach && block_loopback)
+               qpn |= (1 << 31);
+
+       err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+                      MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+                      MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
 
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        if (prot == MLX4_PROT_ETH)
                gid[7] |= (steer << 1);
 
-       return mlx4_qp_attach_common(dev, qp, gid,
-                                    block_mcast_loopback, prot,
-                                    steer);
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 1,
+                                       block_mcast_loopback, prot);
+
+       return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+                                       prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
 
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                        !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
-       if (prot == MLX4_PROT_ETH) {
+       if (prot == MLX4_PROT_ETH)
                gid[7] |= (steer << 1);
-       }
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
 
        return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+                       struct mlx4_qp *qp, u8 gid[16],
+                       int block_mcast_loopback, enum mlx4_protocol prot)
+{
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (MLX4_UC_STEER << 1);
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 1,
+                                       block_mcast_loopback, prot);
+
+       return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+                                       prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                              u8 gid[16], enum mlx4_protocol prot)
+{
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (MLX4_UC_STEER << 1);
+
+       if (mlx4_is_mfunc(dev))
+               return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+       return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+       u8 port = vhcr->in_param >> 62;
+       enum mlx4_steer_type steer = vhcr->in_modifier;
+
+       /* Promiscuous unicast is not allowed in mfunc */
+       if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+               return 0;
+
+       if (vhcr->op_modifier)
+               return add_promisc_qp(dev, port, steer, qpn);
+       else
+               return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+                       enum mlx4_steer_type steer, u8 add, u8 port)
+{
+       return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+                       MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
+}
 
 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
 {
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
 
-       return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+       return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
 
@@ -884,8 +995,10 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
 
-       return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+       return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
 
@@ -894,8 +1007,10 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
 
-       return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+       return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
 
@@ -904,7 +1019,10 @@ int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
        if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
                return 0;
 
-       return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+       if (mlx4_is_mfunc(dev))
+               return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+       return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
 
index 5dfa68f..a80121a 100644 (file)
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
 
 #define DRV_NAME       "mlx4_core"
-#define DRV_VERSION    "1.0"
-#define DRV_RELDATE    "July 14, 2011"
+#define PFX            DRV_NAME ": "
+#define DRV_VERSION    "1.1"
+#define DRV_RELDATE    "Dec, 2011"
 
 enum {
        MLX4_HCR_BASE           = 0x80680,
        MLX4_HCR_SIZE           = 0x0001c,
-       MLX4_CLR_INT_SIZE       = 0x00008
+       MLX4_CLR_INT_SIZE       = 0x00008,
+       MLX4_SLAVE_COMM_BASE    = 0x0,
+       MLX4_COMM_PAGESIZE      = 0x1000
 };
 
 enum {
-       MLX4_MGM_ENTRY_SIZE     =  0x100,
-       MLX4_QP_PER_MGM         = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
-       MLX4_MTT_ENTRY_PER_SEG  = 8
+       MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+       MLX4_MAX_QP_PER_MGM     = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+       MLX4_MTT_ENTRY_PER_SEG  = 8,
 };
 
 enum {
@@ -80,6 +84,94 @@ enum {
        MLX4_NUM_CMPTS          = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
 };
 
+enum mlx4_mr_state {
+       MLX4_MR_DISABLED = 0,
+       MLX4_MR_EN_HW,
+       MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME         10000
+enum {
+       MLX4_COMM_CMD_RESET,
+       MLX4_COMM_CMD_VHCR0,
+       MLX4_COMM_CMD_VHCR1,
+       MLX4_COMM_CMD_VHCR2,
+       MLX4_COMM_CMD_VHCR_EN,
+       MLX4_COMM_CMD_VHCR_POST,
+       MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES   10
+#define SLEEP_TIME_IN_RESET    (2 * 1000)
+enum mlx4_resource {
+       RES_QP,
+       RES_CQ,
+       RES_SRQ,
+       RES_XRCD,
+       RES_MPT,
+       RES_MTT,
+       RES_MAC,
+       RES_VLAN,
+       RES_EQ,
+       RES_COUNTER,
+       MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+       RES_OP_RESERVE,
+       RES_OP_RESERVE_AND_MAP,
+       RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+       u64     in_param;
+       u64     out_param;
+       u32     in_modifier;
+       u32     errno;
+       u16     op;
+       u16     token;
+       u8      op_modifier;
+       u8      e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+       __be64 in_param;
+       __be32 in_modifier;
+       __be64 out_param;
+       __be16 token;
+       u16 reserved;
+       u8 status;
+       u8 flags;
+       __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+       u16 opcode;
+       bool has_inbox;
+       bool has_outbox;
+       bool out_is_imm;
+       bool encode_slave_id;
+       int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+                     struct mlx4_cmd_mailbox *inbox);
+       int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+                      struct mlx4_cmd_mailbox *inbox,
+                      struct mlx4_cmd_mailbox *outbox,
+                      struct mlx4_cmd_info *cmd);
+};
+
 #ifdef CONFIG_MLX4_DEBUG
 extern int mlx4_debug_level;
 #else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do {                                                                        \
 #define mlx4_warn(mdev, format, arg...) \
        dev_warn(&mdev->pdev->dev, format, ##arg)
 
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES    (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
 struct mlx4_bitmap {
        u32                     last;
        u32                     top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
        struct mlx4_icm       **icm;
 };
 
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+       __be32 flags;
+       __be32 qpn;
+       __be32 key;
+       __be32 pd_flags;
+       __be64 start;
+       __be64 length;
+       __be32 lkey;
+       __be32 win_cnt;
+       u8      reserved1[3];
+       u8      mtt_rep;
+       __be64 mtt_addr;
+       __be32 mtt_sz;
+       __be32 entity_size;
+       __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       u8                      log_eq_size;
+       u8                      reserved2[4];
+       u8                      eq_period;
+       u8                      reserved3;
+       u8                      eq_max_count;
+       u8                      reserved4[3];
+       u8                      intr;
+       u8                      log_page_size;
+       u8                      reserved5[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       u32                     reserved6[2];
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved7[4];
+};
+
+struct mlx4_cq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       __be32                  logsize_usrpage;
+       __be16                  cq_period;
+       __be16                  cq_max_count;
+       u8                      reserved2[3];
+       u8                      comp_eqn;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  last_notified_index;
+       __be32                  solicit_producer_index;
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved4[2];
+       __be64                  db_rec_addr;
+};
+
+struct mlx4_srq_context {
+       __be32                  state_logsize_srqn;
+       u8                      logstride;
+       u8                      reserved1;
+       __be16                  xrcd;
+       __be32                  pg_offset_cqn;
+       u32                     reserved2;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  pd;
+       __be16                  limit_watermark;
+       __be16                  wqe_cnt;
+       u16                     reserved4;
+       __be16                  wqe_counter;
+       u32                     reserved5;
+       __be64                  db_rec_addr;
+};
+
+struct mlx4_eqe {
+       u8                      reserved1;
+       u8                      type;
+       u8                      reserved2;
+       u8                      subtype;
+       union {
+               u32             raw[6];
+               struct {
+                       __be32  cqn;
+               } __packed comp;
+               struct {
+                       u16     reserved1;
+                       __be16  token;
+                       u32     reserved2;
+                       u8      reserved3[3];
+                       u8      status;
+                       __be64  out_param;
+               } __packed cmd;
+               struct {
+                       __be32  qpn;
+               } __packed qp;
+               struct {
+                       __be32  srqn;
+               } __packed srq;
+               struct {
+                       __be32  cqn;
+                       u32     reserved1;
+                       u8      reserved2[3];
+                       u8      syndrome;
+               } __packed cq_err;
+               struct {
+                       u32     reserved1[2];
+                       __be32  port;
+               } __packed port_change;
+               struct {
+                       #define COMM_CHANNEL_BIT_ARRAY_SIZE     4
+                       u32 reserved;
+                       u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+               } __packed comm_channel_arm;
+               struct {
+                       u8      port;
+                       u8      reserved[3];
+                       __be64  mac;
+               } __packed mac_update;
+               struct {
+                       u8      port;
+               } __packed sw_event;
+               struct {
+                       __be32  slave_id;
+               } __packed flr_event;
+       }                       event;
+       u8                      slave_id;
+       u8                      reserved3[2];
+       u8                      owner;
+} __packed;
+
 struct mlx4_eq {
        struct mlx4_dev        *dev;
        void __iomem           *doorbell;
@@ -142,6 +381,18 @@ struct mlx4_eq {
        struct mlx4_mtt         mtt;
 };
 
+struct mlx4_slave_eqe {
+       u8 type;
+       u8 port;
+       u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+       u32 eqn;
+       u16 token;
+       u64 event_type;
+};
+
 struct mlx4_profile {
        int                     num_qp;
        int                     rdmarc_per_qp;
@@ -155,16 +406,37 @@ struct mlx4_profile {
 struct mlx4_fw {
        u64                     clr_int_base;
        u64                     catas_offset;
+       u64                     comm_base;
        struct mlx4_icm        *fw_icm;
        struct mlx4_icm        *aux_icm;
        u32                     catas_size;
        u16                     fw_pages;
        u8                      clr_int_bar;
        u8                      catas_bar;
+       u8                      comm_bar;
 };
 
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
+struct mlx4_comm {
+       u32                     slave_write;
+       u32                     slave_read;
+};
+
+enum {
+       MLX4_MCAST_CONFIG       = 0,
+       MLX4_MCAST_DISABLE      = 1,
+       MLX4_MCAST_ENABLE       = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+       __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+       struct list_head list;
+       u64 addr;
+};
 
 struct mlx4_promisc_qp {
        struct list_head list;
@@ -177,19 +449,87 @@ struct mlx4_steer_index {
        struct list_head duplicates;
 };
 
-struct mlx4_mgm {
-       __be32                  next_gid_index;
-       __be32                  members_count;
-       u32                     reserved[2];
-       u8                      gid[16];
-       __be32                  qp[MLX4_QP_PER_MGM];
+struct mlx4_slave_state {
+       u8 comm_toggle;
+       u8 last_cmd;
+       u8 init_port_mask;
+       bool active;
+       u8 function;
+       dma_addr_t vhcr_dma;
+       u16 mtu[MLX4_MAX_PORTS + 1];
+       __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+       struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+       struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+       struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+       struct mlx4_slave_event_eq_info event_eq;
+       u16 eq_pi;
+       u16 eq_ci;
+       spinlock_t lock;
+       /*initialized via the kzalloc*/
+       u8 is_slave_going_down;
+       u32 cookie;
+};
+
+struct slave_list {
+       struct mutex mutex;
+       struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+       spinlock_t lock;
+       /* tree for each resources */
+       struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+       /* num_of_slave's lists, one per slave */
+       struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE    128
+struct mlx4_slave_event_eq {
+       u32 eqn;
+       u32 cons;
+       u32 prod;
+       struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+       int proxy_qp0_active;
+       int qp0_active;
+       int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+       struct mlx4_slave_state *slave_state;
+       struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+       int                     init_port_ref[MLX4_MAX_PORTS + 1];
+       u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
+       struct mlx4_resource_tracker res_tracker;
+       struct workqueue_struct *comm_wq;
+       struct work_struct      comm_work;
+       struct work_struct      slave_event_work;
+       struct work_struct      slave_flr_event_work;
+       spinlock_t              slave_state_lock;
+       __be32                  comm_arm_bit_vector[4];
+       struct mlx4_eqe         cmd_eqe;
+       struct mlx4_slave_event_eq slave_eq;
+       struct mutex            gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+       struct mlx4_comm __iomem       *comm;
+       struct mlx4_vhcr_cmd           *vhcr;
+       dma_addr_t                      vhcr_dma;
+
+       struct mlx4_mfunc_master_ctx    master;
 };
+
 struct mlx4_cmd {
        struct pci_pool        *pool;
        void __iomem           *hcr;
        struct mutex            hcr_mutex;
        struct semaphore        poll_sem;
        struct semaphore        event_sem;
+       struct semaphore        slave_sem;
        int                     max_cmds;
        spinlock_t              context_lock;
        int                     free_head;
@@ -197,6 +537,7 @@ struct mlx4_cmd {
        u16                     token_mask;
        u8                      use_events;
        u8                      toggle;
+       u8                      comm_toggle;
 };
 
 struct mlx4_uar_table {
@@ -287,6 +628,48 @@ struct mlx4_vlan_table {
        int                     max;
 };
 
+#define SET_PORT_GEN_ALL_VALID         0x7
+#define SET_PORT_PROMISC_SHIFT         31
+#define SET_PORT_MC_PROMISC_SHIFT      30
+
+enum {
+       MCAST_DIRECT_ONLY       = 0,
+       MCAST_DIRECT            = 1,
+       MCAST_DEFAULT           = 2
+};
+
+
+struct mlx4_set_port_general_context {
+       u8 reserved[3];
+       u8 flags;
+       u16 reserved2;
+       __be16 mtu;
+       u8 pptx;
+       u8 pfctx;
+       u16 reserved3;
+       u8 pprx;
+       u8 pfcrx;
+       u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+       __be32 base_qpn;
+       u8 rererved;
+       u8 n_mac;
+       u8 n_vlan;
+       u8 n_prio;
+       u8 reserved2[3];
+       u8 mac_miss;
+       u8 intra_no_vlan;
+       u8 no_vlan;
+       u8 intra_vlan_miss;
+       u8 vlan_miss;
+       u8 reserved3[3];
+       u8 no_vlan_prio;
+       __be32 promisc;
+       __be32 mcast;
+};
+
 struct mlx4_mac_entry {
        u64 mac;
 };
@@ -333,6 +716,7 @@ struct mlx4_priv {
 
        struct mlx4_fw          fw;
        struct mlx4_cmd         cmd;
+       struct mlx4_mfunc       mfunc;
 
        struct mlx4_bitmap      pd_bitmap;
        struct mlx4_bitmap      xrcd_bitmap;
@@ -359,6 +743,7 @@ struct mlx4_priv {
        struct list_head        bf_list;
        struct mutex            bf_mutex;
        struct io_mapping       *bf_mapping;
+       int                     reserved_mtts;
 };
 
 static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +788,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+                    struct mlx4_vhcr *vhcr,
+                    struct mlx4_cmd_mailbox *inbox,
+                    struct mlx4_cmd_mailbox *outbox,
+                    struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                           int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    int start_index, int npages, u64 *page_list);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +860,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                      struct mlx4_profile *request,
                      struct mlx4_dev_cap *dev_cap,
                      struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
 
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
 int mlx4_cmd_use_events(struct mlx4_dev *dev);
 void mlx4_cmd_use_polling(struct mlx4_dev *dev);
 
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+                 unsigned long timeout);
+
 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
 
@@ -452,12 +993,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+                                   enum mlx4_resource resource_type,
+                                   int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
 int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
 
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd);
 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          enum mlx4_protocol prot, enum mlx4_steer_type steer);
 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot,
                          enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+                                    int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+                               struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+                                  struct mlx4_vhcr *vhcr,
+                                  struct mlx4_cmd_mailbox *inbox,
+                                  struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+                           struct mlx4_vhcr *vhcr,
+                           struct mlx4_cmd_mailbox *inbox,
+                           struct mlx4_cmd_mailbox *outbox,
+                           struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+       *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+       *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+       return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+       return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+       return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
 #endif /* MLX4_H */
index 207b5ad..f2a8e65 100644 (file)
@@ -51,8 +51,8 @@
 #include "en_port.h"
 
 #define DRV_NAME       "mlx4_en"
-#define DRV_VERSION    "1.5.4.2"
-#define DRV_RELDATE    "October 2011"
+#define DRV_VERSION    "2.0"
+#define DRV_RELDATE    "Dec 2011"
 
 #define MLX4_EN_MSG_LEVEL      (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
 
@@ -366,16 +366,6 @@ struct mlx4_en_rss_map {
        enum mlx4_qp_state indir_state;
 };
 
-struct mlx4_en_rss_context {
-       __be32 base_qpn;
-       __be32 default_qpn;
-       u16 reserved;
-       u8 hash_fn;
-       u8 flags;
-       __be32 rss_key[10];
-       __be32 base_qpn_udp;
-};
-
 struct mlx4_en_port_state {
        int link_state;
        int link_speed;
@@ -463,6 +453,7 @@ struct mlx4_en_priv {
        int base_qpn;
 
        struct mlx4_en_rss_map rss_map;
+       u32 ctrl_flags;
        u32 flags;
 #define MLX4_EN_FLAG_PROMISC   0x1
 #define MLX4_EN_FLAG_MC_PROMISC        0x2
@@ -495,9 +486,9 @@ struct mlx4_en_priv {
 enum mlx4_en_wol {
        MLX4_EN_WOL_MAGIC = (1ULL << 61),
        MLX4_EN_WOL_ENABLED = (1ULL << 62),
-       MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
 };
 
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
 
 void mlx4_en_destroy_netdev(struct net_device *dev);
 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
index efa3e77..f7243b2 100644 (file)
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/slab.h>
+#include <linux/kernel.h>
 
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
 #include "icm.h"
 
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
-       __be32 flags;
-       __be32 qpn;
-       __be32 key;
-       __be32 pd_flags;
-       __be64 start;
-       __be64 length;
-       __be32 lkey;
-       __be32 win_cnt;
-       u8      reserved1[3];
-       u8      mtt_rep;
-       __be64 mtt_seg;
-       __be32 mtt_sz;
-       __be32 entity_size;
-       __be32 first_byte_offset;
-} __packed;
-
 #define MLX4_MPT_FLAG_SW_OWNS      (0xfUL << 28)
 #define MLX4_MPT_FLAG_FREE         (0x3UL << 28)
 #define MLX4_MPT_FLAG_MIO          (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
        kfree(buddy->num_free);
 }
 
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
 {
        struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
        u32 seg;
+       int seg_order;
+       u32 offset;
 
-       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+       seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
        if (seg == -1)
                return -1;
 
-       if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
-                                seg + (1 << order) - 1)) {
-               mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+       offset = seg * (1 << log_mtts_per_seg);
+
+       if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+                                offset + (1 << order) - 1)) {
+               mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
                return -1;
        }
 
-       return seg;
+       return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, order);
+               err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+                                                      RES_OP_RESERVE_AND_MAP,
+                                                      MLX4_CMD_ALLOC_RES,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_WRAPPED);
+               if (err)
+                       return -1;
+               return get_param_l(&out_param);
+       }
+       return __mlx4_alloc_mtt_range(dev, order);
 }
 
 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
        } else
                mtt->page_shift = page_shift;
 
-       for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+       for (mtt->order = 0, i = 1; i < npages; i <<= 1)
                ++mtt->order;
 
-       mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
-       if (mtt->first_seg == -1)
+       mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+       if (mtt->offset == -1)
                return -ENOMEM;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
 
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
 {
+       u32 first_seg;
+       int seg_order;
        struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
 
+       seg_order = max_t(int, order - log_mtts_per_seg, 0);
+       first_seg = offset / (1 << log_mtts_per_seg);
+
+       mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+       mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
+                            first_seg + (1 << seg_order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, offset);
+               set_param_h(&in_param, order);
+               err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+                                                      MLX4_CMD_FREE_RES,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_WRAPPED);
+               if (err)
+                       mlx4_warn(dev, "Failed to free mtt range at:"
+                                 "%d order:%d\n", offset, order);
+               return;
+       }
+        __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
        if (mtt->order < 0)
                return;
 
-       mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-       mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
-                            mtt->first_seg + (1 << mtt->order) - 1);
+       mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
 
 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
 {
-       return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+       return (u64) mtt->offset * dev->caps.mtt_entry_sz;
 }
 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
 
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
-       return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
-                       MLX4_CMD_TIME_CLASS_B);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+                       0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
-                           !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+                           !mailbox, MLX4_CMD_HW2SW_MPT,
+                           MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
-                 int npages, int page_shift, struct mlx4_mr *mr)
+static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                         u32 *base_mridx)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       u32 index;
-       int err;
+       u32 mridx;
 
-       index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
-       if (index == -1)
+       mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+       if (mridx == -1)
                return -ENOMEM;
 
+       *base_mridx = mridx;
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+                          u64 iova, u64 size, u32 access, int npages,
+                          int page_shift, struct mlx4_mr *mr)
+{
        mr->iova       = iova;
        mr->size       = size;
        mr->pd         = pd;
        mr->access     = access;
-       mr->enabled    = 0;
-       mr->key        = hw_index_to_key(index);
+       mr->enabled    = MLX4_MR_DISABLED;
+       mr->key        = hw_index_to_key(mridx);
+
+       return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+                         struct mlx4_cmd_mailbox *mailbox,
+                         int num_entries)
+{
+       return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+                       MLX4_CMD_TIME_CLASS_A,  MLX4_CMD_WRAPPED);
+}
 
-       err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+       u64 out_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       return -1;
+               return get_param_l(&out_param);
+       }
+       return  __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, index);
+               if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to release mr index:%d\n",
+                                 index);
+               return;
+       }
+       __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+       u64 param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&param, index);
+               return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+                                                       MLX4_CMD_ALLOC_RES,
+                                                       MLX4_CMD_TIME_CLASS_A,
+                                                       MLX4_CMD_WRAPPED);
+       }
+       return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, index);
+               if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+                            MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                            MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+                                 index);
+               return;
+       }
+       return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+                 int npages, int page_shift, struct mlx4_mr *mr)
+{
+       u32 index;
+       int err;
+
+       index = mlx4_mr_reserve(dev);
+       if (index == -1)
+               return -ENOMEM;
+
+       err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+                                    access, npages, page_shift, mr);
        if (err)
-               mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+               mlx4_mr_release(dev, index);
 
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
 
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-       struct mlx4_priv *priv = mlx4_priv(dev);
        int err;
 
-       if (mr->enabled) {
+       if (mr->enabled == MLX4_MR_EN_HW) {
                err = mlx4_HW2SW_MPT(dev, NULL,
                                     key_to_hw_index(mr->key) &
                                     (dev->caps.num_mpts - 1));
                if (err)
-                       mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
-       }
+                       mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
 
+               mr->enabled = MLX4_MR_EN_SW;
+       }
        mlx4_mtt_cleanup(dev, &mr->mtt);
-       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+       mlx4_mr_free_reserved(dev, mr);
+       if (mr->enabled)
+               mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+       mlx4_mr_release(dev, key_to_hw_index(mr->key));
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_free);
 
 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mpt_entry *mpt_entry;
        int err;
 
-       err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
        if (err)
                return err;
 
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
 
        if (mr->mtt.order < 0) {
                mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
-               mpt_entry->mtt_seg = 0;
+               mpt_entry->mtt_addr = 0;
        } else {
-               mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+               mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+                                                 &mr->mtt));
        }
 
        if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
                mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
                                                   MLX4_MPT_PD_FLAG_RAE);
-               mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
-                                                  dev->caps.mtts_per_seg);
+               mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
        } else {
                mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
        }
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
                mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
                goto err_cmd;
        }
-
-       mr->enabled = 1;
+       mr->enabled = MLX4_MR_EN_HW;
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -373,7 +546,7 @@ err_cmd:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
 err_table:
-       mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        __be64 *mtts;
        dma_addr_t dma_handle;
        int i;
-       int s = start_index * sizeof (u64);
 
-       /* All MTTs must fit in the same page */
-       if (start_index / (PAGE_SIZE / sizeof (u64)) !=
-           (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
-               return -EINVAL;
-
-       if (start_index & (dev->caps.mtts_per_seg - 1))
-               return -EINVAL;
+       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+                              start_index, &dma_handle);
 
-       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
-                               s / dev->caps.mtt_entry_sz, &dma_handle);
        if (!mtts)
                return -ENOMEM;
 
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        return 0;
 }
 
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    int start_index, int npages, u64 *page_list)
 {
+       int err = 0;
        int chunk;
-       int err;
+       int mtts_per_page;
+       int max_mtts_first_page;
 
-       if (mtt->order < 0)
-               return -EINVAL;
+       /* compute how may mtts fit in the first page */
+       mtts_per_page = PAGE_SIZE / sizeof(u64);
+       max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+                             % mtts_per_page;
+
+       chunk = min_t(int, max_mtts_first_page, npages);
 
        while (npages > 0) {
-               chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
                err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
                if (err)
                        return err;
-
                npages      -= chunk;
                start_index += chunk;
                page_list   += chunk;
+
+               chunk = min_t(int, mtts_per_page, npages);
        }
+       return err;
+}
 
-       return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  int start_index, int npages, u64 *page_list)
+{
+       struct mlx4_cmd_mailbox *mailbox = NULL;
+       __be64 *inbox = NULL;
+       int chunk;
+       int err = 0;
+       int i;
+
+       if (mtt->order < 0)
+               return -EINVAL;
+
+       if (mlx4_is_mfunc(dev)) {
+               mailbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(mailbox))
+                       return PTR_ERR(mailbox);
+               inbox = mailbox->buf;
+
+               while (npages > 0) {
+                       chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+                                     npages);
+                       inbox[0] = cpu_to_be64(mtt->offset + start_index);
+                       inbox[1] = 0;
+                       for (i = 0; i < chunk; ++i)
+                               inbox[i + 2] = cpu_to_be64(page_list[i] |
+                                              MLX4_MTT_FLAG_PRESENT);
+                       err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+                       if (err) {
+                               mlx4_free_cmd_mailbox(dev, mailbox);
+                               return err;
+                       }
+
+                       npages      -= chunk;
+                       start_index += chunk;
+                       page_list   += chunk;
+               }
+               mlx4_free_cmd_mailbox(dev, mailbox);
+               return err;
+       }
+
+       return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
 }
 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
 
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
 
 int mlx4_init_mr_table(struct mlx4_dev *dev)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mr_table *mr_table = &priv->mr_table;
        int err;
 
+       if (!is_power_of_2(dev->caps.num_mpts))
+               return -EINVAL;
+
+       /* Nothing to do for slaves - all MR handling is forwarded
+       * to the master */
+       if (mlx4_is_slave(dev))
+               return 0;
+
        err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
                               ~0, dev->caps.reserved_mrws, 0);
        if (err)
                return err;
 
        err = mlx4_buddy_init(&mr_table->mtt_buddy,
-                             ilog2(dev->caps.num_mtt_segs));
+                             ilog2(dev->caps.num_mtts /
+                             (1 << log_mtts_per_seg)));
        if (err)
                goto err_buddy;
 
        if (dev->caps.reserved_mtts) {
-               if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+               priv->reserved_mtts =
+                       mlx4_alloc_mtt_range(dev,
+                                            fls(dev->caps.reserved_mtts - 1));
+               if (priv->reserved_mtts < 0) {
                        mlx4_warn(dev, "MTT table of order %d is too small.\n",
                                  mr_table->mtt_buddy.max_order);
                        err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
 
 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
 {
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mr_table *mr_table = &priv->mr_table;
 
+       if (mlx4_is_slave(dev))
+               return;
+       if (priv->reserved_mtts >= 0)
+               mlx4_free_mtt_range(dev, priv->reserved_mtts,
+                                   fls(dev->caps.reserved_mtts - 1));
        mlx4_buddy_cleanup(&mr_table->mtt_buddy);
        mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
 }
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
                   int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       u64 mtt_seg;
+       u64 mtt_offset;
        int err = -ENOMEM;
 
        if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
        if (err)
                return err;
 
-       mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+       mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
 
        fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
-                                   fmr->mr.mtt.first_seg,
+                                   fmr->mr.mtt.offset,
                                    &fmr->dma_handle);
+
        if (!fmr->mtts) {
                err = -ENOMEM;
                goto err_free;
@@ -619,6 +852,46 @@ err_free:
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
 
+static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+                           u32 pd, u32 access, int max_pages,
+                           int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err = -ENOMEM;
+
+       if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+               return -EINVAL;
+
+       /* All MTTs must fit in the same page */
+       if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+               return -EINVAL;
+
+       fmr->page_shift = page_shift;
+       fmr->max_pages  = max_pages;
+       fmr->max_maps   = max_maps;
+       fmr->maps = 0;
+
+       err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+                                    page_shift, &fmr->mr);
+       if (err)
+               return err;
+
+       fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+                                   fmr->mr.mtt.offset,
+                                   &fmr->dma_handle);
+       if (!fmr->mtts) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
+       return 0;
+
+err_free:
+       mlx4_mr_free_reserved(dev, &fmr->mr);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
                    u32 *lkey, u32 *rkey)
 {
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
        if (!fmr->maps)
                return;
 
        fmr->maps = 0;
 
-       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+                      " failed (%d)\n", err);
+               return;
+       }
+
+       err = mlx4_HW2SW_MPT(dev, NULL,
+                            key_to_hw_index(fmr->mr.key) &
+                            (dev->caps.num_mpts - 1));
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err) {
+               printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+                      err);
+               return;
+       }
+       fmr->mr.enabled = MLX4_MR_EN_SW;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
        if (fmr->maps)
                return -EBUSY;
 
-       fmr->mr.enabled = 0;
        mlx4_mr_free(dev, &fmr->mr);
+       fmr->mr.enabled = MLX4_MR_DISABLED;
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_free);
 
+static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+       if (fmr->maps)
+               return -EBUSY;
+
+       mlx4_mr_free_reserved(dev, &fmr->mr);
+       fmr->mr.enabled = MLX4_MR_DISABLED;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
 {
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+                       MLX4_CMD_WRAPPED);
 }
 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
index 260ed25..5c9a54d 100644 (file)
@@ -31,6 +31,7 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io-mapping.h>
@@ -51,7 +52,8 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
        *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
        if (*pdn == -1)
                return -ENOMEM;
-
+       if (mlx4_is_mfunc(dev))
+               *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
@@ -85,7 +87,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
 
        return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
-                               (1 << 24) - 1, dev->caps.reserved_pds, 0);
+                               (1 << NOT_MASKED_PD_BITS) - 1,
+                                dev->caps.reserved_pds, 0);
 }
 
 void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +111,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
 
 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
 {
+       int offset;
+
        uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
        if (uar->index == -1)
                return -ENOMEM;
 
-       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+       if (mlx4_is_slave(dev))
+               offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+                                      dev->caps.uar_page_size);
+       else
+               offset = uar->index;
+       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
        uar->map = NULL;
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +241,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
 
        return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
                                dev->caps.num_uars, dev->caps.num_uars - 1,
-                               max(128, dev->caps.reserved_uars), 0);
+                               dev->caps.reserved_uars, 0);
 }
 
 void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
index d942aea..00a9547 100644 (file)
@@ -70,41 +70,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
        table->total = 0;
 }
 
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
-                                  __be64 *entries)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 in_mod;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
-       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
-                            u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
        struct mlx4_qp qp;
        u8 gid[16] = {0};
        int err;
 
-       if (reserve) {
-               err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
-               if (err) {
-                       mlx4_err(dev, "Failed to reserve qp for mac registration\n");
-                       return err;
-               }
-       }
        qp.qpn = *qpn;
 
        mac &= 0xffffffffffffULL;
@@ -113,16 +84,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
        gid[5] = port;
        gid[7] = MLX4_UC_STEER << 1;
 
-       err = mlx4_qp_attach_common(dev, &qp, gid, 0,
-                                   MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (err && reserve)
-               mlx4_qp_release_range(dev, *qpn, 1);
+       err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+       if (err)
+               mlx4_warn(dev, "Failed Attaching Unicast\n");
 
        return err;
 }
 
 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
-                                 u64 mac, int qpn, u8 free)
+                                 u64 mac, int qpn)
 {
        struct mlx4_qp qp;
        u8 gid[16] = {0};
@@ -134,60 +104,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
        gid[5] = port;
        gid[7] = MLX4_UC_STEER << 1;
 
-       mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (free)
-               mlx4_qp_release_range(dev, qpn, 1);
+       mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+                         struct mlx4_mac_table *table, int index)
+{
+       int err = 0;
+
+       if (index < 0 || index >= table->max || !table->entries[index]) {
+               mlx4_warn(dev, "No valid Mac entry for the given index\n");
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+                     struct mlx4_mac_table *table, u64 mac)
+{
+       int i;
+
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if ((mac & MLX4_MAC_MASK) ==
+                   (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+                       return i;
+       }
+       /* Mac not found */
+       return -EINVAL;
 }
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
        struct mlx4_mac_entry *entry;
-       int i, err = 0;
-       int free = -1;
+       int index = 0;
+       int err = 0;
 
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
-               if (err)
-                       return err;
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+                       (unsigned long long) mac);
+       index = mlx4_register_mac(dev, port, mac);
+       if (index < 0) {
+               err = index;
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) mac);
+               return err;
+       }
 
-               entry = kmalloc(sizeof *entry, GFP_KERNEL);
-               if (!entry) {
-                       mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                       return -ENOMEM;
-               }
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+               *qpn = info->base_qpn + index;
+               return 0;
+       }
+
+       err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+       mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+       if (err) {
+               mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+               goto qp_err;
+       }
+
+       err = mlx4_uc_steer_add(dev, port, mac, qpn);
+       if (err)
+               goto steer_err;
 
-               entry->mac = mac;
-               err = radix_tree_insert(&info->mac_tree, *qpn, entry);
-               if (err) {
+       entry = kmalloc(sizeof *entry, GFP_KERNEL);
+       if (!entry) {
+               err = -ENOMEM;
+               goto alloc_err;
+       }
+       entry->mac = mac;
+       err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+       if (err)
+               goto insert_err;
+       return 0;
+
+insert_err:
+       kfree(entry);
+
+alloc_err:
+       mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+       mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+       mlx4_unregister_mac(dev, port, mac);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_entry *entry;
+
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+                (unsigned long long) mac);
+       mlx4_unregister_mac(dev, port, mac);
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+               entry = radix_tree_lookup(&info->mac_tree, qpn);
+               if (entry) {
+                       mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+                                " qpn %d\n", port,
+                                (unsigned long long) mac, qpn);
+                       mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+                       mlx4_qp_release_range(dev, qpn, 1);
+                       radix_tree_delete(&info->mac_tree, qpn);
                        kfree(entry);
-                       mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                       return err;
                }
        }
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+                                  __be64 *entries)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_mod;
+       int err;
 
-       mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_table *table = &info->mac_table;
+       int i, err = 0;
+       int free = -1;
+
+       mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+                (unsigned long long) mac, port);
 
        mutex_lock(&table->mutex);
-       for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
-               if (free < 0 && !table->refs[i]) {
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if (free < 0 && !table->entries[i]) {
                        free = i;
                        continue;
                }
 
                if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
-                       /* MAC already registered, increase references count */
-                       ++table->refs[i];
+                       /* MAC already registered, Must not have duplicates */
+                       err = -EEXIST;
                        goto out;
                }
        }
 
-       if (free < 0) {
-               err = -ENOMEM;
-               goto out;
-       }
-
        mlx4_dbg(dev, "Free MAC index is %d\n", free);
 
        if (table->total == table->max) {
@@ -197,103 +271,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
        }
 
        /* Register new MAC */
-       table->refs[free] = 1;
        table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
 
        err = mlx4_set_port_mac_table(dev, port, table->entries);
        if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
-               table->refs[free] = 0;
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) mac);
                table->entries[free] = 0;
                goto out;
        }
 
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-               *qpn = info->base_qpn + free;
+       err = free;
        ++table->total;
 out:
        mutex_unlock(&table->mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
 
-static int validate_index(struct mlx4_dev *dev,
-                         struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
-       int err = 0;
+       u64 out_param;
+       int err;
 
-       if (index < 0 || index >= table->max || !table->entries[index]) {
-               mlx4_warn(dev, "No valid Mac entry for the given index\n");
-               err = -EINVAL;
-       }
-       return err;
-}
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
 
-static int find_index(struct mlx4_dev *dev,
-                     struct mlx4_mac_table *table, u64 mac)
-{
-       int i;
-       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
-                       return i;
+               return get_param_l(&out_param);
        }
-       /* Mac not found */
-       return -EINVAL;
+       return __mlx4_register_mac(dev, port, mac);
 }
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
 
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
-       struct mlx4_mac_entry *entry;
+       int index;
 
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (entry) {
-                       mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
-                       radix_tree_delete(&info->mac_tree, qpn);
-                       index = find_index(dev, table, entry->mac);
-                       kfree(entry);
-               }
-       }
+       index = find_index(dev, table, mac);
 
        mutex_lock(&table->mutex);
 
        if (validate_index(dev, table, index))
                goto out;
 
-       /* Check whether this address has reference count */
-       if (!(--table->refs[index])) {
-               table->entries[index] = 0;
-               mlx4_set_port_mac_table(dev, port, table->entries);
-               --table->total;
-       }
+       table->entries[index] = 0;
+       mlx4_set_port_mac_table(dev, port, table->entries);
+       --table->total;
 out:
        mutex_unlock(&table->mutex);
 }
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               return;
+       }
+       __mlx4_unregister_mac(dev, port, mac);
+       return;
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
 
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
 {
        struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
        struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
        struct mlx4_mac_entry *entry;
-       int err;
+       int index = qpn - info->base_qpn;
+       int err = 0;
 
        if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
                entry = radix_tree_lookup(&info->mac_tree, qpn);
                if (!entry)
                        return -EINVAL;
-               index = find_index(dev, table, entry->mac);
-               mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+               mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+               mlx4_unregister_mac(dev, port, entry->mac);
                entry->mac = new_mac;
-               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
-               if (err || index < 0)
-                       return err;
+               mlx4_register_mac(dev, port, new_mac);
+               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+               return err;
        }
 
+       /* CX1 doesn't support multi-functions */
        mutex_lock(&table->mutex);
 
        err = validate_index(dev, table, index);
@@ -304,7 +378,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
 
        err = mlx4_set_port_mac_table(dev, port, table->entries);
        if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+                        (unsigned long long) new_mac);
                table->entries[index] = 0;
        }
 out:
@@ -312,6 +387,7 @@ out:
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
                                    __be32 *entries)
 {
@@ -326,7 +402,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
        memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
        in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
        err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -352,7 +428,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
 }
 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
 
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+                               int *index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
        int i, err = 0;
@@ -387,7 +464,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
                goto out;
        }
 
-       /* Register new MAC */
+       /* Register new VLAN */
        table->refs[free] = 1;
        table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
 
@@ -405,9 +482,27 @@ out:
        mutex_unlock(&table->mutex);
        return err;
 }
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&out_param, port);
+               err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+                                  RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *index = get_param_l(&out_param);
+
+               return err;
+       }
+       return __mlx4_register_vlan(dev, port, vlan, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
 
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 {
        struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
 
@@ -432,6 +527,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
 out:
        mutex_unlock(&table->mutex);
 }
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, port);
+               err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+                              MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                              MLX4_CMD_WRAPPED);
+               if (!err)
+                       mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+                                       index);
+
+               return;
+       }
+       __mlx4_unregister_vlan(dev, port, index);
+}
 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
 
 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +576,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
        *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
        err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
        if (!err)
                *caps = *(__be32 *) (outbuf + 84);
        mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +614,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
        *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
 
        err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+                          MLX4_CMD_NATIVE);
 
        packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
 
@@ -512,6 +628,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
        return err;
 }
 
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+                               u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_port_info *port_info;
+       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+       struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+       struct mlx4_set_port_rqp_calc_context *qpn_context;
+       struct mlx4_set_port_general_context *gen_context;
+       int reset_qkey_viols;
+       int port;
+       int is_eth;
+       u32 in_modifier;
+       u32 promisc;
+       u16 mtu, prev_mtu;
+       int err;
+       int i;
+       __be32 agg_cap_mask;
+       __be32 slave_cap_mask;
+       __be32 new_cap_mask;
+
+       port = in_mod & 0xff;
+       in_modifier = in_mod >> 8;
+       is_eth = op_mod;
+       port_info = &priv->port[port];
+
+       /* Slaves cannot perform SET_PORT operations except changing MTU */
+       if (is_eth) {
+               if (slave != dev->caps.function &&
+                   in_modifier != MLX4_SET_PORT_GENERAL) {
+                       mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+                                       slave);
+                       return -EINVAL;
+               }
+               switch (in_modifier) {
+               case MLX4_SET_PORT_RQP_CALC:
+                       qpn_context = inbox->buf;
+                       qpn_context->base_qpn =
+                               cpu_to_be32(port_info->base_qpn);
+                       qpn_context->n_mac = 0x7;
+                       promisc = be32_to_cpu(qpn_context->promisc) >>
+                               SET_PORT_PROMISC_SHIFT;
+                       qpn_context->promisc = cpu_to_be32(
+                               promisc << SET_PORT_PROMISC_SHIFT |
+                               port_info->base_qpn);
+                       promisc = be32_to_cpu(qpn_context->mcast) >>
+                               SET_PORT_MC_PROMISC_SHIFT;
+                       qpn_context->mcast = cpu_to_be32(
+                               promisc << SET_PORT_MC_PROMISC_SHIFT |
+                               port_info->base_qpn);
+                       break;
+               case MLX4_SET_PORT_GENERAL:
+                       gen_context = inbox->buf;
+                       /* Mtu is configured as the max MTU among all the
+                        * the functions on the port. */
+                       mtu = be16_to_cpu(gen_context->mtu);
+                       mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+                       prev_mtu = slave_st->mtu[port];
+                       slave_st->mtu[port] = mtu;
+                       if (mtu > master->max_mtu[port])
+                               master->max_mtu[port] = mtu;
+                       if (mtu < prev_mtu && prev_mtu ==
+                                               master->max_mtu[port]) {
+                               slave_st->mtu[port] = mtu;
+                               master->max_mtu[port] = mtu;
+                               for (i = 0; i < dev->num_slaves; i++) {
+                                       master->max_mtu[port] =
+                                       max(master->max_mtu[port],
+                                           master->slave_state[i].mtu[port]);
+                               }
+                       }
+
+                       gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+                       break;
+               }
+               return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+                               MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                               MLX4_CMD_NATIVE);
+       }
+
+       /* For IB, we only consider:
+        * - The capability mask, which is set to the aggregate of all
+        *   slave function capabilities
+        * - The QKey violatin counter - reset according to each request.
+        */
+
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+               new_cap_mask = ((__be32 *) inbox->buf)[2];
+       } else {
+               reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+               new_cap_mask = ((__be32 *) inbox->buf)[1];
+       }
+
+       agg_cap_mask = 0;
+       slave_cap_mask =
+               priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+       priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+       for (i = 0; i < dev->num_slaves; i++)
+               agg_cap_mask |=
+                       priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+       /* only clear mailbox for guests.  Master may be setting
+       * MTU or PKEY table size
+       */
+       if (slave != dev->caps.function)
+               memset(inbox->buf, 0, 256);
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               *(u8 *) inbox->buf         = !!reset_qkey_viols << 6;
+               ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+       } else {
+               ((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
+               ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+       }
+
+       err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+       if (err)
+               priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+                       slave_cap_mask;
+       return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+                                   vhcr->op_modifier, inbox);
+}
+
 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +777,127 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 
        ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
        err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
 }
+
+static int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       int err;
+       u32 in_mod;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->flags = SET_PORT_GEN_ALL_VALID;
+       context->mtu = cpu_to_be16(mtu);
+       context->pptx = (pptx * (!pfctx)) << 7;
+       context->pfctx = pfctx;
+       context->pprx = (pprx * (!pfcrx)) << 7;
+       context->pfcrx = pfcrx;
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+static int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+                          u8 promisc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_rqp_calc_context *context;
+       int err;
+       u32 in_mod;
+       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+               MCAST_DIRECT : MCAST_DEFAULT;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
+           dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+               return 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->base_qpn = cpu_to_be32(base_qpn);
+       context->n_mac = dev->caps.log_num_macs;
+       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+                                      base_qpn);
+       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+                                    base_qpn);
+       context->intra_no_vlan = 0;
+       context->no_vlan = MLX4_NO_VLAN_IDX;
+       context->intra_vlan_miss = 0;
+       context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       int err = 0;
+
+       return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+                       u64 mac, u64 clear, u8 mode)
+{
+       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+                       MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int err = 0;
+
+       return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+                              u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+       return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+                           MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+                           MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+                               struct mlx4_vhcr *vhcr,
+                               struct mlx4_cmd_mailbox *inbox,
+                               struct mlx4_cmd_mailbox *outbox,
+                               struct mlx4_cmd_info *cmd)
+{
+       return mlx4_common_dump_eth_stats(dev, slave,
+                                         vhcr->in_modifier, outbox);
+}
index b967647..66f91ca 100644 (file)
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
        profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
        profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
        profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
-       profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
-       profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
+       profile[MLX4_RES_MTT].size    = dev_cap->mtt_entry_sz;
+       profile[MLX4_RES_MCG].size    = mlx4_get_mgm_entry_size(dev);
 
        profile[MLX4_RES_QP].num      = request->num_qp;
        profile[MLX4_RES_RDMARC].num  = request->num_qp * request->rdmarc_per_qp;
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        init_hca->cmpt_base      = profile[i].start;
                        break;
                case MLX4_RES_MTT:
-                       dev->caps.num_mtt_segs   = profile[i].num;
+                       dev->caps.num_mtts       = profile[i].num;
                        priv->mr_table.mtt_base  = profile[i].start;
                        init_hca->mtt_base       = profile[i].start;
                        break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                        dev->caps.num_mgms        = profile[i].num >> 1;
                        dev->caps.num_amgms       = profile[i].num >> 1;
                        init_hca->mc_base         = profile[i].start;
-                       init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+                       init_hca->log_mc_entry_sz =
+                                       ilog2(mlx4_get_mgm_entry_size(dev));
                        init_hca->log_mc_table_sz = profile[i].log_num;
                        init_hca->log_mc_hash_sz  = profile[i].log_num - 1;
                        break;
index 15f870c..6b03ac8 100644 (file)
@@ -35,6 +35,8 @@
 
 #include <linux/gfp.h>
 #include <linux/export.h>
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
 
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
        spin_unlock(&qp_table->lock);
 
        if (!qp) {
-               mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+               mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
                return;
        }
 
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
                complete(&qp->free);
 }
 
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
-                  struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
-                  int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+       return qp->qpn >= dev->caps.sqp_start &&
+               qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                    struct mlx4_qp_context *context,
+                    enum mlx4_qp_optpar optpar,
+                    int sqd_event, struct mlx4_qp *qp, int native)
 {
        static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
                [MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                }
        };
 
+       struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cmd_mailbox *mailbox;
        int ret = 0;
+       u8 port;
 
        if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
            !op[cur_state][new_state])
                return -EINVAL;
 
-       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
-               return mlx4_cmd(dev, 0, qp->qpn, 2,
-                               MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+               ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+                       MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+               if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+                   cur_state != MLX4_QP_STATE_RST &&
+                   is_qp0(dev, qp)) {
+                       port = (qp->qpn & 1) + 1;
+                       priv->mfunc.master.qp0_state[port].qp0_active = 0;
+               }
+               return ret;
+       }
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -132,107 +151,218 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
        }
 
+       port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
+       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+               context->pri_path.sched_queue = (context->pri_path.sched_queue &
+                                               0xc3);
+
        *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
        memcpy(mailbox->buf + 8, context, sizeof *context);
 
        ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
                cpu_to_be32(qp->qpn);
 
-       ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+       ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+                      qp->qpn | (!!sqd_event << 31),
                       new_state == MLX4_QP_STATE_RST ? 2 : 0,
-                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return ret;
 }
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                  struct mlx4_qp_context *context,
+                  enum mlx4_qp_optpar optpar,
+                  int sqd_event, struct mlx4_qp *qp)
+{
+       return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+                               optpar, sqd_event, qp, 0);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+                                  int *base)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
-       int qpn;
 
-       qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
-       if (qpn == -1)
+       *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+       if (*base == -1)
                return -ENOMEM;
 
-       *base = qpn;
        return 0;
 }
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+       u64 in_param;
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, cnt);
+               set_param_h(&in_param, align);
+               err = mlx4_cmd_imm(dev, in_param, &out_param,
+                                  RES_QP, RES_OP_RESERVE,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err)
+                       return err;
+
+               *base = get_param_l(&out_param);
+               return 0;
+       }
+       return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
 
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
-       if (base_qpn < dev->caps.sqp_start + 8)
-               return;
 
+       if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+               return;
        mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
 }
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+       u64 in_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, base_qpn);
+               set_param_h(&in_param, cnt);
+               err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+                              MLX4_CMD_FREE_RES,
+                              MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (err) {
+                       mlx4_warn(dev, "Failed to release qp range"
+                                 " base:%d cnt:%d\n", base_qpn, cnt);
+               }
+       } else
+                __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_qp_table *qp_table = &priv->qp_table;
        int err;
 
-       if (!qpn)
-               return -EINVAL;
-
-       qp->qpn = qpn;
-
-       err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
        if (err)
                goto err_out;
 
-       err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
        if (err)
                goto err_put_qp;
 
-       err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
        if (err)
                goto err_put_auxc;
 
-       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
        if (err)
                goto err_put_altc;
 
-       err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+       err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
        if (err)
                goto err_put_rdmarc;
 
-       spin_lock_irq(&qp_table->lock);
-       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
-       spin_unlock_irq(&qp_table->lock);
-       if (err)
-               goto err_put_cmpt;
-
-       atomic_set(&qp->refcount, 1);
-       init_completion(&qp->free);
-
        return 0;
 
-err_put_cmpt:
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
 err_put_rdmarc:
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
 
 err_put_altc:
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qpn);
 
 err_put_auxc:
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qpn);
 
 err_put_qp:
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qpn);
 
 err_out:
        return err;
 }
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+       u64 param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&param, qpn);
+               return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+                                   MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+                                   MLX4_CMD_WRAPPED);
+       }
+       return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+       mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, qpn);
+               if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+                            MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+                            MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+       } else
+               __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+       int err;
+
+       if (!qpn)
+               return -EINVAL;
+
+       qp->qpn = qpn;
+
+       err = mlx4_qp_alloc_icm(dev, qpn);
+       if (err)
+               return err;
+
+       spin_lock_irq(&qp_table->lock);
+       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+                               (dev->caps.num_qps - 1), qp);
+       spin_unlock_irq(&qp_table->lock);
+       if (err)
+               goto err_icm;
+
+       atomic_set(&qp->refcount, 1);
+       init_completion(&qp->free);
+
+       return 0;
+
+err_icm:
+       mlx4_qp_free_icm(dev, qpn);
+       return err;
+}
+
 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 
 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +378,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
 
 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 {
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
        if (atomic_dec_and_test(&qp->refcount))
                complete(&qp->free);
        wait_for_completion(&qp->free);
 
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+       mlx4_qp_free_icm(dev, qp->qpn);
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_free);
 
 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
 {
        return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 }
 
 int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +400,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
        spin_lock_init(&qp_table->lock);
        INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        /*
         * We reserve 2 extra QPs per port for the special QPs.  The
@@ -327,6 +453,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
+
        mlx4_CONF_SPECIAL_QP(dev, 0);
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 }
@@ -342,7 +471,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
                return PTR_ERR(mailbox);
 
        err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
-                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_WRAPPED);
        if (!err)
                memcpy(context, mailbox->buf + 8, sizeof *context);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644 (file)
index 0000000..bdd61c3
--- /dev/null
@@ -0,0 +1,3103 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID         (1ull << 63)
+#define MLX4_MAC_MASK          0x7fffffffffffffffULL
+#define ETH_ALEN               6
+
+struct mac_res {
+       struct list_head list;
+       u64 mac;
+       u8 port;
+};
+
+struct res_common {
+       struct list_head        list;
+       u32                     res_id;
+       int                     owner;
+       int                     state;
+       int                     from_state;
+       int                     to_state;
+       int                     removing;
+};
+
+enum {
+       RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+       struct list_head        list;
+       u8                      gid[16];
+       enum mlx4_protocol      prot;
+};
+
+enum res_qp_states {
+       RES_QP_BUSY = RES_ANY_BUSY,
+
+       /* QP number was allocated */
+       RES_QP_RESERVED,
+
+       /* ICM memory for QP context was mapped */
+       RES_QP_MAPPED,
+
+       /* QP is in hw ownership */
+       RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+       switch (state) {
+       case RES_QP_BUSY: return "RES_QP_BUSY";
+       case RES_QP_RESERVED: return "RES_QP_RESERVED";
+       case RES_QP_MAPPED: return "RES_QP_MAPPED";
+       case RES_QP_HW: return "RES_QP_HW";
+       default: return "Unknown";
+       }
+}
+
+struct res_qp {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       struct res_cq          *rcq;
+       struct res_cq          *scq;
+       struct res_srq         *srq;
+       struct list_head        mcg_list;
+       spinlock_t              mcg_spl;
+       int                     local_qpn;
+};
+
+enum res_mtt_states {
+       RES_MTT_BUSY = RES_ANY_BUSY,
+       RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+       switch (state) {
+       case RES_MTT_BUSY: return "RES_MTT_BUSY";
+       case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+       default: return "Unknown";
+       }
+}
+
+struct res_mtt {
+       struct res_common       com;
+       int                     order;
+       atomic_t                ref_count;
+};
+
+enum res_mpt_states {
+       RES_MPT_BUSY = RES_ANY_BUSY,
+       RES_MPT_RESERVED,
+       RES_MPT_MAPPED,
+       RES_MPT_HW,
+};
+
+struct res_mpt {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       int                     key;
+};
+
+enum res_eq_states {
+       RES_EQ_BUSY = RES_ANY_BUSY,
+       RES_EQ_RESERVED,
+       RES_EQ_HW,
+};
+
+struct res_eq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+};
+
+enum res_cq_states {
+       RES_CQ_BUSY = RES_ANY_BUSY,
+       RES_CQ_ALLOCATED,
+       RES_CQ_HW,
+};
+
+struct res_cq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       atomic_t                ref_count;
+};
+
+enum res_srq_states {
+       RES_SRQ_BUSY = RES_ANY_BUSY,
+       RES_SRQ_ALLOCATED,
+       RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+       switch (state) {
+       case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+       case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+       case RES_SRQ_HW: return "RES_SRQ_HW";
+       default: return "Unknown";
+       }
+}
+
+struct res_srq {
+       struct res_common       com;
+       struct res_mtt         *mtt;
+       struct res_cq          *cq;
+       atomic_t                ref_count;
+};
+
+enum res_counter_states {
+       RES_COUNTER_BUSY = RES_ANY_BUSY,
+       RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+       switch (state) {
+       case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+       case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+       default: return "Unknown";
+       }
+}
+
+struct res_counter {
+       struct res_common       com;
+       int                     port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+       switch (rt) {
+       case RES_QP: return "RES_QP";
+       case RES_CQ: return "RES_CQ";
+       case RES_SRQ: return "RES_SRQ";
+       case RES_MPT: return "RES_MPT";
+       case RES_MTT: return "RES_MTT";
+       case RES_MAC: return  "RES_MAC";
+       case RES_EQ: return "RES_EQ";
+       case RES_COUNTER: return "RES_COUNTER";
+       default: return "Unknown resource type !!!";
+       };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+       int t;
+
+       priv->mfunc.master.res_tracker.slave_list =
+               kzalloc(dev->num_slaves * sizeof(struct slave_list),
+                       GFP_KERNEL);
+       if (!priv->mfunc.master.res_tracker.slave_list)
+               return -ENOMEM;
+
+       for (i = 0 ; i < dev->num_slaves; i++) {
+               for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+                       INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+                                      slave_list[i].res_list[t]);
+               mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+       }
+
+       mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+                dev->num_slaves);
+       for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+               INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+                               GFP_ATOMIC|__GFP_NOWARN);
+
+       spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+       return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+
+       if (priv->mfunc.master.res_tracker.slave_list) {
+               for (i = 0 ; i < dev->num_slaves; i++)
+                       mlx4_delete_all_resources_for_slave(dev, i);
+
+               kfree(priv->mfunc.master.res_tracker.slave_list);
+       }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+                         struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+       u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+       if (MLX4_QP_ST_UD == ts)
+               qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+       mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+               slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+       return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+                     enum mlx4_resource type)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+                                res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+                  enum mlx4_resource type,
+                  void *res)
+{
+       struct res_common *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = find_res(dev, res_id, type);
+       if (!r) {
+               err = -ENONET;
+               goto exit;
+       }
+
+       if (r->state == RES_ANY_BUSY) {
+               err = -EBUSY;
+               goto exit;
+       }
+
+       if (r->owner != slave) {
+               err = -EPERM;
+               goto exit;
+       }
+
+       r->from_state = r->state;
+       r->state = RES_ANY_BUSY;
+       mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+                ResourceType(type), r->res_id);
+
+       if (res)
+               *((struct res_common **)res) = r;
+
+exit:
+       spin_unlock_irq(mlx4_tlock(dev));
+       return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+                                   enum mlx4_resource type,
+                                   int res_id, int *slave)
+{
+
+       struct res_common *r;
+       int err = -ENOENT;
+       int id = res_id;
+
+       if (type == RES_QP)
+               id &= 0x7fffff;
+       spin_lock_irq(mlx4_tlock(dev));
+
+       r = find_res(dev, id, type);
+       if (r) {
+               *slave = r->owner;
+               err = 0;
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+                   enum mlx4_resource type)
+{
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = find_res(dev, res_id, type);
+       if (r)
+               r->state = r->from_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+       struct res_qp *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_QP_RESERVED;
+       INIT_LIST_HEAD(&ret->mcg_list);
+       spin_lock_init(&ret->mcg_spl);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+       struct res_mtt *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->order = order;
+       ret->com.state = RES_MTT_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+       struct res_mpt *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_MPT_RESERVED;
+       ret->key = key;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+       struct res_eq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_EQ_RESERVED;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+       struct res_cq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_CQ_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+       struct res_srq *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_SRQ_ALLOCATED;
+       atomic_set(&ret->ref_count, 0);
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+       struct res_counter *ret;
+
+       ret = kzalloc(sizeof *ret, GFP_KERNEL);
+       if (!ret)
+               return NULL;
+
+       ret->com.res_id = id;
+       ret->com.state = RES_COUNTER_ALLOCATED;
+
+       return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+                                  int extra)
+{
+       struct res_common *ret;
+
+       switch (type) {
+       case RES_QP:
+               ret = alloc_qp_tr(id);
+               break;
+       case RES_MPT:
+               ret = alloc_mpt_tr(id, extra);
+               break;
+       case RES_MTT:
+               ret = alloc_mtt_tr(id, extra);
+               break;
+       case RES_EQ:
+               ret = alloc_eq_tr(id);
+               break;
+       case RES_CQ:
+               ret = alloc_cq_tr(id);
+               break;
+       case RES_SRQ:
+               ret = alloc_srq_tr(id);
+               break;
+       case RES_MAC:
+               printk(KERN_ERR "implementation missing\n");
+               return NULL;
+       case RES_COUNTER:
+               ret = alloc_counter_tr(id);
+               break;
+
+       default:
+               return NULL;
+       }
+       if (ret)
+               ret->owner = slave;
+
+       return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+                        enum mlx4_resource type, int extra)
+{
+       int i;
+       int err;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct res_common **res_arr;
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct radix_tree_root *root = &tracker->res_tree[type];
+
+       res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+       if (!res_arr)
+               return -ENOMEM;
+
+       for (i = 0; i < count; ++i) {
+               res_arr[i] = alloc_tr(base + i, type, slave, extra);
+               if (!res_arr[i]) {
+                       for (--i; i >= 0; --i)
+                               kfree(res_arr[i]);
+
+                       kfree(res_arr);
+                       return -ENOMEM;
+               }
+       }
+
+       spin_lock_irq(mlx4_tlock(dev));
+       for (i = 0; i < count; ++i) {
+               if (find_res(dev, base + i, type)) {
+                       err = -EEXIST;
+                       goto undo;
+               }
+               err = radix_tree_insert(root, base + i, res_arr[i]);
+               if (err)
+                       goto undo;
+               list_add_tail(&res_arr[i]->list,
+                             &tracker->slave_list[slave].res_list[type]);
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+       kfree(res_arr);
+
+       return 0;
+
+undo:
+       for (--i; i >= base; --i)
+               radix_tree_delete(&tracker->res_tree[type], i);
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       for (i = 0; i < count; ++i)
+               kfree(res_arr[i]);
+
+       kfree(res_arr);
+
+       return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+       if (res->com.state == RES_QP_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_QP_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+       if (res->com.state == RES_MTT_BUSY ||
+           atomic_read(&res->ref_count)) {
+               printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+                      __func__, __LINE__,
+                      mtt_states_str(res->com.state),
+                      atomic_read(&res->ref_count));
+               return -EBUSY;
+       } else if (res->com.state != RES_MTT_ALLOCATED)
+               return -EPERM;
+       else if (res->order != order)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+       if (res->com.state == RES_MPT_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_MPT_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+       if (res->com.state == RES_MPT_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_MPT_RESERVED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+       if (res->com.state == RES_COUNTER_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_COUNTER_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+       if (res->com.state == RES_CQ_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_CQ_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+       if (res->com.state == RES_SRQ_BUSY)
+               return -EBUSY;
+       else if (res->com.state != RES_SRQ_ALLOCATED)
+               return -EPERM;
+
+       return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+       switch (type) {
+       case RES_QP:
+               return remove_qp_ok((struct res_qp *)res);
+       case RES_CQ:
+               return remove_cq_ok((struct res_cq *)res);
+       case RES_SRQ:
+               return remove_srq_ok((struct res_srq *)res);
+       case RES_MPT:
+               return remove_mpt_ok((struct res_mpt *)res);
+       case RES_MTT:
+               return remove_mtt_ok((struct res_mtt *)res, extra);
+       case RES_MAC:
+               return -ENOSYS;
+       case RES_EQ:
+               return remove_eq_ok((struct res_eq *)res);
+       case RES_COUNTER:
+               return remove_counter_ok((struct res_counter *)res);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+                        enum mlx4_resource type, int extra)
+{
+       int i;
+       int err;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       for (i = base; i < base + count; ++i) {
+               r = radix_tree_lookup(&tracker->res_tree[type], i);
+               if (!r) {
+                       err = -ENOENT;
+                       goto out;
+               }
+               if (r->owner != slave) {
+                       err = -EPERM;
+                       goto out;
+               }
+               err = remove_ok(r, type, extra);
+               if (err)
+                       goto out;
+       }
+
+       for (i = base; i < base + count; ++i) {
+               r = radix_tree_lookup(&tracker->res_tree[type], i);
+               radix_tree_delete(&tracker->res_tree[type], i);
+               list_del(&r->list);
+               kfree(r);
+       }
+       err = 0;
+
+out:
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+                               enum res_qp_states state, struct res_qp **qp,
+                               int alloc)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_qp *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_QP_BUSY:
+                       mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+                                __func__, r->com.res_id);
+                       err = -EBUSY;
+                       break;
+
+               case RES_QP_RESERVED:
+                       if (r->com.state == RES_QP_MAPPED && !alloc)
+                               break;
+
+                       mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+                       err = -EINVAL;
+                       break;
+
+               case RES_QP_MAPPED:
+                       if ((r->com.state == RES_QP_RESERVED && alloc) ||
+                           r->com.state == RES_QP_HW)
+                               break;
+                       else {
+                               mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+                                         r->com.res_id);
+                               err = -EINVAL;
+                       }
+
+                       break;
+
+               case RES_QP_HW:
+                       if (r->com.state != RES_QP_MAPPED)
+                               err = -EINVAL;
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_QP_BUSY;
+                       if (qp)
+                               *qp = (struct res_qp *)r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                               enum res_mpt_states state, struct res_mpt **mpt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_mpt *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_MPT_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_MPT_RESERVED:
+                       if (r->com.state != RES_MPT_MAPPED)
+                               err = -EINVAL;
+                       break;
+
+               case RES_MPT_MAPPED:
+                       if (r->com.state != RES_MPT_RESERVED &&
+                           r->com.state != RES_MPT_HW)
+                               err = -EINVAL;
+                       break;
+
+               case RES_MPT_HW:
+                       if (r->com.state != RES_MPT_MAPPED)
+                               err = -EINVAL;
+                       break;
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_MPT_BUSY;
+                       if (mpt)
+                               *mpt = (struct res_mpt *)r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                               enum res_eq_states state, struct res_eq **eq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_eq *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_EQ_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_EQ_RESERVED:
+                       if (r->com.state != RES_EQ_HW)
+                               err = -EINVAL;
+                       break;
+
+               case RES_EQ_HW:
+                       if (r->com.state != RES_EQ_RESERVED)
+                               err = -EINVAL;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_EQ_BUSY;
+                       if (eq)
+                               *eq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+                               enum res_cq_states state, struct res_cq **cq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_cq *r;
+       int err;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_CQ_BUSY:
+                       err = -EBUSY;
+                       break;
+
+               case RES_CQ_ALLOCATED:
+                       if (r->com.state != RES_CQ_HW)
+                               err = -EINVAL;
+                       else if (atomic_read(&r->ref_count))
+                               err = -EBUSY;
+                       else
+                               err = 0;
+                       break;
+
+               case RES_CQ_HW:
+                       if (r->com.state != RES_CQ_ALLOCATED)
+                               err = -EINVAL;
+                       else
+                               err = 0;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_CQ_BUSY;
+                       if (cq)
+                               *cq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+                                enum res_cq_states state, struct res_srq **srq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_srq *r;
+       int err = 0;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+       if (!r)
+               err = -ENOENT;
+       else if (r->com.owner != slave)
+               err = -EPERM;
+       else {
+               switch (state) {
+               case RES_SRQ_BUSY:
+                       err = -EINVAL;
+                       break;
+
+               case RES_SRQ_ALLOCATED:
+                       if (r->com.state != RES_SRQ_HW)
+                               err = -EINVAL;
+                       else if (atomic_read(&r->ref_count))
+                               err = -EBUSY;
+                       break;
+
+               case RES_SRQ_HW:
+                       if (r->com.state != RES_SRQ_ALLOCATED)
+                               err = -EINVAL;
+                       break;
+
+               default:
+                       err = -EINVAL;
+               }
+
+               if (!err) {
+                       r->com.from_state = r->com.state;
+                       r->com.to_state = state;
+                       r->com.state = RES_SRQ_BUSY;
+                       if (srq)
+                               *srq = r;
+               }
+       }
+
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+                          enum mlx4_resource type, int id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[type], id);
+       if (r && (r->owner == slave))
+               r->state = r->from_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+                        enum mlx4_resource type, int id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_common *r;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       r = radix_tree_lookup(&tracker->res_tree[type], id);
+       if (r && (r->owner == slave))
+               r->state = r->to_state;
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+       return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int err;
+       int count;
+       int align;
+       int base;
+       int qpn;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               count = get_param_l(&in_param);
+               align = get_param_h(&in_param);
+               err = __mlx4_qp_reserve_range(dev, count, align, &base);
+               if (err)
+                       return err;
+
+               err = add_res_range(dev, slave, base, count, RES_QP, 0);
+               if (err) {
+                       __mlx4_qp_release_range(dev, base, count);
+                       return err;
+               }
+               set_param_l(out_param, base);
+               break;
+       case RES_OP_MAP_ICM:
+               qpn = get_param_l(&in_param) & 0x7fffff;
+               if (valid_reserved(dev, slave, qpn)) {
+                       err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+                       if (err)
+                               return err;
+               }
+
+               err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+                                          NULL, 1);
+               if (err)
+                       return err;
+
+               if (!valid_reserved(dev, slave, qpn)) {
+                       err = __mlx4_qp_alloc_icm(dev, qpn);
+                       if (err) {
+                               res_abort_move(dev, slave, RES_QP, qpn);
+                               return err;
+                       }
+               }
+
+               res_end_move(dev, slave, RES_QP, qpn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int base;
+       int order;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       order = get_param_l(&in_param);
+       base = __mlx4_alloc_mtt_range(dev, order);
+       if (base == -1)
+               return -ENOMEM;
+
+       err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+       if (err)
+               __mlx4_free_mtt_range(dev, base, order);
+       else
+               set_param_l(out_param, base);
+
+       return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int index;
+       int id;
+       struct res_mpt *mpt;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               index = __mlx4_mr_reserve(dev);
+               if (index == -1)
+                       break;
+               id = index & mpt_mask(dev);
+
+               err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+               if (err) {
+                       __mlx4_mr_release(dev, index);
+                       break;
+               }
+               set_param_l(out_param, index);
+               break;
+       case RES_OP_MAP_ICM:
+               index = get_param_l(&in_param);
+               id = index & mpt_mask(dev);
+               err = mr_res_start_move_to(dev, slave, id,
+                                          RES_MPT_MAPPED, &mpt);
+               if (err)
+                       return err;
+
+               err = __mlx4_mr_alloc_icm(dev, mpt->key);
+               if (err) {
+                       res_abort_move(dev, slave, RES_MPT, id);
+                       return err;
+               }
+
+               res_end_move(dev, slave, RES_MPT, id);
+               break;
+       }
+       return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int cqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               err = __mlx4_cq_alloc_icm(dev, &cqn);
+               if (err)
+                       break;
+
+               err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+               if (err) {
+                       __mlx4_cq_free_icm(dev, cqn);
+                       break;
+               }
+
+               set_param_l(out_param, cqn);
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int srqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               err = __mlx4_srq_alloc_icm(dev, &srqn);
+               if (err)
+                       break;
+
+               err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+               if (err) {
+                       __mlx4_srq_free_icm(dev, srqn);
+                       break;
+               }
+
+               set_param_l(out_param, srqn);
+               break;
+
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct mac_res *res;
+
+       res = kzalloc(sizeof *res, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+       res->mac = mac;
+       res->port = (u8) port;
+       list_add_tail(&res->list,
+                     &tracker->slave_list[slave].res_list[RES_MAC]);
+       return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+                              int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               if (res->mac == mac && res->port == (u8) port) {
+                       list_del(&res->list);
+                       kfree(res);
+                       break;
+               }
+       }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               list_del(&res->list);
+               __mlx4_unregister_mac(dev, res->port, res->mac);
+               kfree(res);
+       }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int port;
+       u64 mac;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       port = get_param_l(out_param);
+       mac = in_param;
+
+       err = __mlx4_register_mac(dev, port, mac);
+       if (err >= 0) {
+               set_param_l(out_param, err);
+               err = 0;
+       }
+
+       if (!err) {
+               err = mac_add_to_slave(dev, slave, mac, port);
+               if (err)
+                       __mlx4_unregister_mac(dev, port, mac);
+       }
+       return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                        u64 in_param, u64 *out_param)
+{
+       return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int alop = vhcr->op_modifier;
+
+       switch (vhcr->in_modifier) {
+       case RES_QP:
+               err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MTT:
+               err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MPT:
+               err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_CQ:
+               err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_SRQ:
+               err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MAC:
+               err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_VLAN:
+               err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+                                   vhcr->in_param, &vhcr->out_param);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                      u64 in_param)
+{
+       int err;
+       int count;
+       int base;
+       int qpn;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               base = get_param_l(&in_param) & 0x7fffff;
+               count = get_param_h(&in_param);
+               err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+               if (err)
+                       break;
+               __mlx4_qp_release_range(dev, base, count);
+               break;
+       case RES_OP_MAP_ICM:
+               qpn = get_param_l(&in_param) & 0x7fffff;
+               err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+                                          NULL, 0);
+               if (err)
+                       return err;
+
+               if (!valid_reserved(dev, slave, qpn))
+                       __mlx4_qp_free_icm(dev, qpn);
+
+               res_end_move(dev, slave, RES_QP, qpn);
+
+               if (valid_reserved(dev, slave, qpn))
+                       err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int err = -EINVAL;
+       int base;
+       int order;
+
+       if (op != RES_OP_RESERVE_AND_MAP)
+               return err;
+
+       base = get_param_l(&in_param);
+       order = get_param_h(&in_param);
+       err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+       if (!err)
+               __mlx4_free_mtt_range(dev, base, order);
+       return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param)
+{
+       int err = -EINVAL;
+       int index;
+       int id;
+       struct res_mpt *mpt;
+
+       switch (op) {
+       case RES_OP_RESERVE:
+               index = get_param_l(&in_param);
+               id = index & mpt_mask(dev);
+               err = get_res(dev, slave, id, RES_MPT, &mpt);
+               if (err)
+                       break;
+               index = mpt->key;
+               put_res(dev, slave, id, RES_MPT);
+
+               err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+               if (err)
+                       break;
+               __mlx4_mr_release(dev, index);
+               break;
+       case RES_OP_MAP_ICM:
+                       index = get_param_l(&in_param);
+                       id = index & mpt_mask(dev);
+                       err = mr_res_start_move_to(dev, slave, id,
+                                                  RES_MPT_RESERVED, &mpt);
+                       if (err)
+                               return err;
+
+                       __mlx4_mr_free_icm(dev, mpt->key);
+                       res_end_move(dev, slave, RES_MPT, id);
+                       return err;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+       return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                      u64 in_param, u64 *out_param)
+{
+       int cqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               cqn = get_param_l(&in_param);
+               err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+               if (err)
+                       break;
+
+               __mlx4_cq_free_icm(dev, cqn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                       u64 in_param, u64 *out_param)
+{
+       int srqn;
+       int err;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               srqn = get_param_l(&in_param);
+               err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+               if (err)
+                       break;
+
+               __mlx4_srq_free_icm(dev, srqn);
+               break;
+
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                           u64 in_param, u64 *out_param)
+{
+       int port;
+       int err = 0;
+
+       switch (op) {
+       case RES_OP_RESERVE_AND_MAP:
+               port = get_param_l(out_param);
+               mac_del_from_slave(dev, slave, in_param, port);
+               __mlx4_unregister_mac(dev, port, in_param);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+                           u64 in_param, u64 *out_param)
+{
+       return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err = -EINVAL;
+       int alop = vhcr->op_modifier;
+
+       switch (vhcr->in_modifier) {
+       case RES_QP:
+               err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+                                 vhcr->in_param);
+               break;
+
+       case RES_MTT:
+               err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MPT:
+               err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param);
+               break;
+
+       case RES_CQ:
+               err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+                                 vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_SRQ:
+               err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_MAC:
+               err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       case RES_VLAN:
+               err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+                                  vhcr->in_param, &vhcr->out_param);
+               break;
+
+       default:
+               break;
+       }
+       return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+       return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+       return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+       return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
+{
+       return be32_to_cpu(mpt->pd_flags) & 0xffffff;
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+       return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+       int page_shift = (qpc->log_page_size & 0x3f) + 12;
+       int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+       int log_sq_sride = qpc->sq_size_stride & 7;
+       int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+       int log_rq_stride = qpc->rq_size_stride & 7;
+       int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+       int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+       int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+       int sq_size;
+       int rq_size;
+       int total_pages;
+       int total_mem;
+       int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+       sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+       rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+       total_mem = sq_size + rq_size;
+       total_pages =
+               roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+                                  page_shift);
+
+       return total_pages;
+}
+
+static int qp_get_pdn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->pd) & 0xffffff;
+}
+
+static int pdn2slave(int pdn)
+{
+       return (pdn >> NOT_MASKED_PD_BITS) - 1;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+                          int size, struct res_mtt *mtt)
+{
+       int res_start = mtt->com.res_id;
+       int res_size = (1 << mtt->order);
+
+       if (start < res_start || start + size > res_start + res_size)
+               return -EPERM;
+       return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mtt *mtt;
+       struct res_mpt *mpt;
+       int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+       int phys;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+       if (err)
+               return err;
+
+       phys = mr_phys_mpt(inbox->buf);
+       if (!phys) {
+               err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+               if (err)
+                       goto ex_abort;
+
+               err = check_mtt_range(dev, slave, mtt_base,
+                                     mr_get_mtt_size(inbox->buf), mtt);
+               if (err)
+                       goto ex_put;
+
+               mpt->mtt = mtt;
+       }
+
+       if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
+               err = -EPERM;
+               goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put;
+
+       if (!phys) {
+               atomic_inc(&mtt->ref_count);
+               put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       }
+
+       res_end_move(dev, slave, RES_MPT, id);
+       return 0;
+
+ex_put:
+       if (!phys)
+               put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_MPT, id);
+
+       return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mpt *mpt;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+       if (err)
+               return err;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+
+       if (mpt->mtt)
+               atomic_dec(&mpt->mtt->ref_count);
+
+       res_end_move(dev, slave, RES_MPT, id);
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_MPT, id);
+
+       return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier;
+       struct res_mpt *mpt;
+       int id;
+
+       id = index & mpt_mask(dev);
+       err = get_res(dev, slave, id, RES_MPT, &mpt);
+       if (err)
+               return err;
+
+       if (mpt->com.from_state != RES_MPT_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+       put_res(dev, slave, id, RES_MPT);
+       return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+       return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_mtt *mtt;
+       struct res_qp *qp;
+       struct mlx4_qp_context *qpc = inbox->buf + 8;
+       int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+       int mtt_size = qp_get_mtt_size(qpc);
+       struct res_cq *rcq;
+       struct res_cq *scq;
+       int rcqn = qp_get_rcqn(qpc);
+       int scqn = qp_get_scqn(qpc);
+       u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+       int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+       struct res_srq *srq;
+       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+       err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+       if (err)
+               return err;
+       qp->local_qpn = local_qpn;
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_abort;
+
+       err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+       if (err)
+               goto ex_put_mtt;
+
+       if (pdn2slave(qp_get_pdn(qpc)) != slave) {
+               err = -EPERM;
+               goto ex_put_mtt;
+       }
+
+       err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+       if (err)
+               goto ex_put_mtt;
+
+       if (scqn != rcqn) {
+               err = get_res(dev, slave, scqn, RES_CQ, &scq);
+               if (err)
+                       goto ex_put_rcq;
+       } else
+               scq = rcq;
+
+       if (use_srq) {
+               err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+               if (err)
+                       goto ex_put_scq;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put_srq;
+       atomic_inc(&mtt->ref_count);
+       qp->mtt = mtt;
+       atomic_inc(&rcq->ref_count);
+       qp->rcq = rcq;
+       atomic_inc(&scq->ref_count);
+       qp->scq = scq;
+
+       if (scqn != rcqn)
+               put_res(dev, slave, scqn, RES_CQ);
+
+       if (use_srq) {
+               atomic_inc(&srq->ref_count);
+               put_res(dev, slave, srqn, RES_SRQ);
+               qp->srq = srq;
+       }
+       put_res(dev, slave, rcqn, RES_CQ);
+       put_res(dev, slave, mtt_base, RES_MTT);
+       res_end_move(dev, slave, RES_QP, qpn);
+
+       return 0;
+
+ex_put_srq:
+       if (use_srq)
+               put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+       if (scqn != rcqn)
+               put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+       put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+       put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_QP, qpn);
+
+       return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+       return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+       int log_eq_size = eqc->log_eq_size & 0x1f;
+       int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+       if (log_eq_size + 5 < page_shift)
+               return 1;
+
+       return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+       return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+       int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+       int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+       if (log_cq_size + 5 < page_shift)
+               return 1;
+
+       return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int eqn = vhcr->in_modifier;
+       int res_id = (slave << 8) | eqn;
+       struct mlx4_eq_context *eqc = inbox->buf;
+       int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+       int mtt_size = eq_get_mtt_size(eqc);
+       struct res_eq *eq;
+       struct res_mtt *mtt;
+
+       err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+       if (err)
+               return err;
+       err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+       if (err)
+               goto out_add;
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto out_move;
+
+       err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+       if (err)
+               goto out_put;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_put;
+
+       atomic_inc(&mtt->ref_count);
+       eq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_EQ, res_id);
+       return 0;
+
+out_put:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+       res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+       rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+       return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+                             int len, struct res_mtt **res)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct res_mtt *mtt;
+       int err = -EINVAL;
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+                           com.list) {
+               if (!check_mtt_range(dev, slave, start, len, mtt)) {
+                       *res = mtt;
+                       mtt->com.from_state = mtt->com.state;
+                       mtt->com.state = RES_MTT_BUSY;
+                       err = 0;
+                       break;
+               }
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_mtt mtt;
+       __be64 *page_list = inbox->buf;
+       u64 *pg_list = (u64 *)page_list;
+       int i;
+       struct res_mtt *rmtt = NULL;
+       int start = be64_to_cpu(page_list[0]);
+       int npages = vhcr->in_modifier;
+       int err;
+
+       err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+       if (err)
+               return err;
+
+       /* Call the SW implementation of write_mtt:
+        * - Prepare a dummy mtt struct
+        * - Translate inbox contents to simple addresses in host endianess */
+       mtt.offset = 0;  /* TBD this is broken but I don't handle it since
+                           we don't really use it */
+       mtt.order = 0;
+       mtt.page_shift = 0;
+       for (i = 0; i < npages; ++i)
+               pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+       err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+                              ((u64 *)page_list + 2));
+
+       if (rmtt)
+               put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+       return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int eqn = vhcr->in_modifier;
+       int res_id = eqn | (slave << 8);
+       struct res_eq *eq;
+       int err;
+
+       err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+       if (err)
+               return err;
+
+       err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+       if (err)
+               goto ex_abort;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put;
+
+       atomic_dec(&eq->mtt->ref_count);
+       put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_EQ, res_id);
+       rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+       return 0;
+
+ex_put:
+       put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_EQ, res_id);
+
+       return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_slave_event_eq_info *event_eq;
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_modifier = 0;
+       int err;
+       int res_id;
+       struct res_eq *req;
+
+       if (!priv->mfunc.master.slave_state)
+               return -EINVAL;
+
+       event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+
+       /* Create the event only if the slave is registered */
+       if ((event_eq->event_type & (1 << eqe->type)) == 0)
+               return 0;
+
+       mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       res_id = (slave << 8) | event_eq->eqn;
+       err = get_res(dev, slave, res_id, RES_EQ, &req);
+       if (err)
+               goto unlock;
+
+       if (req->com.from_state != RES_EQ_HW) {
+               err = -EINVAL;
+               goto put;
+       }
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto put;
+       }
+
+       if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+               ++event_eq->token;
+               eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+       }
+
+       memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+       in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+       err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+                      MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       put_res(dev, slave, res_id, RES_EQ);
+       mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+
+put:
+       put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+       mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+       return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int eqn = vhcr->in_modifier;
+       int res_id = eqn | (slave << 8);
+       struct res_eq *eq;
+       int err;
+
+       err = get_res(dev, slave, res_id, RES_EQ, &eq);
+       if (err)
+               return err;
+
+       if (eq->com.from_state != RES_EQ_HW) {
+               err = -EINVAL;
+               goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+       put_res(dev, slave, res_id, RES_EQ);
+       return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int cqn = vhcr->in_modifier;
+       struct mlx4_cq_context *cqc = inbox->buf;
+       int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+       struct res_cq *cq;
+       struct res_mtt *mtt;
+
+       err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+       if (err)
+               return err;
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto out_move;
+       err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+       if (err)
+               goto out_put;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_put;
+       atomic_inc(&mtt->ref_count);
+       cq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_CQ, cqn);
+       return 0;
+
+out_put:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+       res_abort_move(dev, slave, RES_CQ, cqn);
+       return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+
+       err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto out_move;
+       atomic_dec(&cq->mtt->ref_count);
+       res_end_move(dev, slave, RES_CQ, cqn);
+       return 0;
+
+out_move:
+       res_abort_move(dev, slave, RES_CQ, cqn);
+       return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                         struct mlx4_vhcr *vhcr,
+                         struct mlx4_cmd_mailbox *inbox,
+                         struct mlx4_cmd_mailbox *outbox,
+                         struct mlx4_cmd_info *cmd)
+{
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+       int err;
+
+       err = get_res(dev, slave, cqn, RES_CQ, &cq);
+       if (err)
+               return err;
+
+       if (cq->com.from_state != RES_CQ_HW)
+               goto ex_put;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+       put_res(dev, slave, cqn, RES_CQ);
+
+       return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd,
+                        struct res_cq *cq)
+{
+       int err;
+       struct res_mtt *orig_mtt;
+       struct res_mtt *mtt;
+       struct mlx4_cq_context *cqc = inbox->buf;
+       int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+       err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+       if (err)
+               return err;
+
+       if (orig_mtt != cq->mtt) {
+               err = -EINVAL;
+               goto ex_put;
+       }
+
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_put;
+
+       err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+       if (err)
+               goto ex_put1;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put1;
+       atomic_dec(&orig_mtt->ref_count);
+       put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+       atomic_inc(&mtt->ref_count);
+       cq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       return 0;
+
+ex_put1:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+       put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+       return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int cqn = vhcr->in_modifier;
+       struct res_cq *cq;
+       int err;
+
+       err = get_res(dev, slave, cqn, RES_CQ, &cq);
+       if (err)
+               return err;
+
+       if (cq->com.from_state != RES_CQ_HW)
+               goto ex_put;
+
+       if (vhcr->op_modifier == 0) {
+               err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+               if (err)
+                       goto ex_put;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+       put_res(dev, slave, cqn, RES_CQ);
+
+       return err;
+}
+
+static int srq_get_pdn(struct mlx4_srq_context *srqc)
+{
+       return be32_to_cpu(srqc->pd) & 0xffffff;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+       int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+       int log_rq_stride = srqc->logstride & 7;
+       int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+       if (log_srq_size + log_rq_stride + 4 < page_shift)
+               return 1;
+
+       return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_mtt *mtt;
+       struct res_srq *srq;
+       struct mlx4_srq_context *srqc = inbox->buf;
+       int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+       if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+               return -EINVAL;
+
+       err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+       if (err)
+               return err;
+       err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+       if (err)
+               goto ex_abort;
+       err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+                             mtt);
+       if (err)
+               goto ex_put_mtt;
+
+       if (pdn2slave(srq_get_pdn(srqc)) != slave) {
+               err = -EPERM;
+               goto ex_put_mtt;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_put_mtt;
+
+       atomic_inc(&mtt->ref_count);
+       srq->mtt = mtt;
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+       res_end_move(dev, slave, RES_SRQ, srqn);
+       return 0;
+
+ex_put_mtt:
+       put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+       res_abort_move(dev, slave, RES_SRQ, srqn);
+
+       return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+       atomic_dec(&srq->mtt->ref_count);
+       if (srq->cq)
+               atomic_dec(&srq->cq->ref_count);
+       res_end_move(dev, slave, RES_SRQ, srqn);
+
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_SRQ, srqn);
+
+       return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                          struct mlx4_vhcr *vhcr,
+                          struct mlx4_cmd_mailbox *inbox,
+                          struct mlx4_cmd_mailbox *outbox,
+                          struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+       if (err)
+               return err;
+       if (srq->com.from_state != RES_SRQ_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, srqn, RES_SRQ);
+       return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int srqn = vhcr->in_modifier;
+       struct res_srq *srq;
+
+       err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+       if (err)
+               return err;
+
+       if (srq->com.from_state != RES_SRQ_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, srqn, RES_SRQ);
+       return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+                       struct mlx4_vhcr *vhcr,
+                       struct mlx4_cmd_mailbox *inbox,
+                       struct mlx4_cmd_mailbox *outbox,
+                       struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+
+       err = get_res(dev, slave, qpn, RES_QP, &qp);
+       if (err)
+               return err;
+       if (qp->com.from_state != RES_QP_HW) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+       put_res(dev, slave, qpn, RES_QP);
+       return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+                            struct mlx4_vhcr *vhcr,
+                            struct mlx4_cmd_mailbox *inbox,
+                            struct mlx4_cmd_mailbox *outbox,
+                            struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+       update_ud_gid(dev, qpc, (u8)slave);
+
+       return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+                        struct mlx4_vhcr *vhcr,
+                        struct mlx4_cmd_mailbox *inbox,
+                        struct mlx4_cmd_mailbox *outbox,
+                        struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int qpn = vhcr->in_modifier & 0x7fffff;
+       struct res_qp *qp;
+
+       err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+       if (err)
+               return err;
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       if (err)
+               goto ex_abort;
+
+       atomic_dec(&qp->mtt->ref_count);
+       atomic_dec(&qp->rcq->ref_count);
+       atomic_dec(&qp->scq->ref_count);
+       if (qp->srq)
+               atomic_dec(&qp->srq->ref_count);
+       res_end_move(dev, slave, RES_QP, qpn);
+       return 0;
+
+ex_abort:
+       res_abort_move(dev, slave, RES_QP, qpn);
+
+       return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+                               struct res_qp *rqp, u8 *gid)
+{
+       struct res_gid *res;
+
+       list_for_each_entry(res, &rqp->mcg_list, list) {
+               if (!memcmp(res->gid, gid, 16))
+                       return res;
+       }
+       return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+                      u8 *gid, enum mlx4_protocol prot)
+{
+       struct res_gid *res;
+       int err;
+
+       res = kzalloc(sizeof *res, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+
+       spin_lock_irq(&rqp->mcg_spl);
+       if (find_gid(dev, slave, rqp, gid)) {
+               kfree(res);
+               err = -EEXIST;
+       } else {
+               memcpy(res->gid, gid, 16);
+               res->prot = prot;
+               list_add_tail(&res->list, &rqp->mcg_list);
+               err = 0;
+       }
+       spin_unlock_irq(&rqp->mcg_spl);
+
+       return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+                      u8 *gid, enum mlx4_protocol prot)
+{
+       struct res_gid *res;
+       int err;
+
+       spin_lock_irq(&rqp->mcg_spl);
+       res = find_gid(dev, slave, rqp, gid);
+       if (!res || res->prot != prot)
+               err = -EINVAL;
+       else {
+               list_del(&res->list);
+               kfree(res);
+               err = 0;
+       }
+       spin_unlock_irq(&rqp->mcg_spl);
+
+       return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       struct mlx4_qp qp; /* dummy for calling attach/detach */
+       u8 *gid = inbox->buf;
+       enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+       int err, err1;
+       int qpn;
+       struct res_qp *rqp;
+       int attach = vhcr->op_modifier;
+       int block_loopback = vhcr->in_modifier >> 31;
+       u8 steer_type_mask = 2;
+       enum mlx4_steer_type type = gid[7] & steer_type_mask;
+
+       qpn = vhcr->in_modifier & 0xffffff;
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
+       if (err)
+               return err;
+
+       qp.qpn = qpn;
+       if (attach) {
+               err = add_mcg_res(dev, slave, rqp, gid, prot);
+               if (err)
+                       goto ex_put;
+
+               err = mlx4_qp_attach_common(dev, &qp, gid,
+                                           block_loopback, prot, type);
+               if (err)
+                       goto ex_rem;
+       } else {
+               err = rem_mcg_res(dev, slave, rqp, gid, prot);
+               if (err)
+                       goto ex_put;
+               err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+       }
+
+       put_res(dev, slave, qpn, RES_QP);
+       return 0;
+
+ex_rem:
+       /* ignore error return below, already in error */
+       err1 = rem_mcg_res(dev, slave, rqp, gid, prot);
+ex_put:
+       put_res(dev, slave, qpn, RES_QP);
+
+       return err;
+}
+
+enum {
+       BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+                              struct mlx4_vhcr *vhcr,
+                              struct mlx4_cmd_mailbox *inbox,
+                              struct mlx4_cmd_mailbox *outbox,
+                              struct mlx4_cmd_info *cmd)
+{
+       int err;
+       int index = vhcr->in_modifier & 0xffff;
+
+       err = get_res(dev, slave, index, RES_COUNTER, NULL);
+       if (err)
+               return err;
+
+       err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+       put_res(dev, slave, index, RES_COUNTER);
+       return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+       struct res_gid *rgid;
+       struct res_gid *tmp;
+       int err;
+       struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+       list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+               qp.qpn = rqp->local_qpn;
+               err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+                                           MLX4_MC_STEER);
+               list_del(&rgid->list);
+               kfree(rgid);
+       }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+                         enum mlx4_resource type, int print)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker =
+               &priv->mfunc.master.res_tracker;
+       struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+       struct res_common *r;
+       struct res_common *tmp;
+       int busy;
+
+       busy = 0;
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(r, tmp, rlist, list) {
+               if (r->owner == slave) {
+                       if (!r->removing) {
+                               if (r->state == RES_ANY_BUSY) {
+                                       if (print)
+                                               mlx4_dbg(dev,
+                                                        "%s id 0x%x is busy\n",
+                                                         ResourceType(type),
+                                                         r->res_id);
+                                       ++busy;
+                               } else {
+                                       r->from_state = r->state;
+                                       r->state = RES_ANY_BUSY;
+                                       r->removing = 1;
+                               }
+                       }
+               }
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+
+       return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+                        enum mlx4_resource type)
+{
+       unsigned long begin;
+       int busy;
+
+       begin = jiffies;
+       do {
+               busy = _move_all_busy(dev, slave, type, 0);
+               if (time_after(jiffies, begin + 5 * HZ))
+                       break;
+               if (busy)
+                       cond_resched();
+       } while (busy);
+
+       if (busy)
+               busy = _move_all_busy(dev, slave, type, 1);
+
+       return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *qp_list =
+               &tracker->slave_list[slave].res_list[RES_QP];
+       struct res_qp *qp;
+       struct res_qp *tmp;
+       int state;
+       u64 in_param;
+       int qpn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_QP);
+       if (err)
+               mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+                         "for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (qp->com.owner == slave) {
+                       qpn = qp->com.res_id;
+                       detach_qp(dev, slave, qp);
+                       state = qp->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_QP_RESERVED:
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_QP],
+                                                         qp->com.res_id);
+                                       list_del(&qp->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(qp);
+                                       state = 0;
+                                       break;
+                               case RES_QP_MAPPED:
+                                       if (!valid_reserved(dev, slave, qpn))
+                                               __mlx4_qp_free_icm(dev, qpn);
+                                       state = RES_QP_RESERVED;
+                                       break;
+                               case RES_QP_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param,
+                                                      qp->local_qpn, 2,
+                                                      MLX4_CMD_2RST_QP,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_qps: failed"
+                                                        " to move slave %d qpn %d to"
+                                                        " reset\n", slave,
+                                                        qp->local_qpn);
+                                       atomic_dec(&qp->rcq->ref_count);
+                                       atomic_dec(&qp->scq->ref_count);
+                                       atomic_dec(&qp->mtt->ref_count);
+                                       if (qp->srq)
+                                               atomic_dec(&qp->srq->ref_count);
+                                       state = RES_QP_MAPPED;
+                                       break;
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *srq_list =
+               &tracker->slave_list[slave].res_list[RES_SRQ];
+       struct res_srq *srq;
+       struct res_srq *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int srqn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_SRQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (srq->com.owner == slave) {
+                       srqn = srq->com.res_id;
+                       state = srq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_SRQ_ALLOCATED:
+                                       __mlx4_srq_free_icm(dev, srqn);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_SRQ],
+                                                         srqn);
+                                       list_del(&srq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(srq);
+                                       state = 0;
+                                       break;
+
+                               case RES_SRQ_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, srqn, 1,
+                                                      MLX4_CMD_HW2SW_SRQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_srqs: failed"
+                                                        " to move slave %d srq %d to"
+                                                        " SW ownership\n",
+                                                        slave, srqn);
+
+                                       atomic_dec(&srq->mtt->ref_count);
+                                       if (srq->cq)
+                                               atomic_dec(&srq->cq->ref_count);
+                                       state = RES_SRQ_ALLOCATED;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *cq_list =
+               &tracker->slave_list[slave].res_list[RES_CQ];
+       struct res_cq *cq;
+       struct res_cq *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int cqn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_CQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+                       cqn = cq->com.res_id;
+                       state = cq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_CQ_ALLOCATED:
+                                       __mlx4_cq_free_icm(dev, cqn);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_CQ],
+                                                         cqn);
+                                       list_del(&cq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(cq);
+                                       state = 0;
+                                       break;
+
+                               case RES_CQ_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, cqn, 1,
+                                                      MLX4_CMD_HW2SW_CQ,
+                                                      MLX4_CMD_TIME_CLASS_A,
+                                                      MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_cqs: failed"
+                                                        " to move slave %d cq %d to"
+                                                        " SW ownership\n",
+                                                        slave, cqn);
+                                       atomic_dec(&cq->mtt->ref_count);
+                                       state = RES_CQ_ALLOCATED;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mpt_list =
+               &tracker->slave_list[slave].res_list[RES_MPT];
+       struct res_mpt *mpt;
+       struct res_mpt *tmp;
+       int state;
+       u64 in_param;
+       LIST_HEAD(tlist);
+       int mptn;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_MPT);
+       if (err)
+               mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (mpt->com.owner == slave) {
+                       mptn = mpt->com.res_id;
+                       state = mpt->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_MPT_RESERVED:
+                                       __mlx4_mr_release(dev, mpt->key);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_MPT],
+                                                         mptn);
+                                       list_del(&mpt->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(mpt);
+                                       state = 0;
+                                       break;
+
+                               case RES_MPT_MAPPED:
+                                       __mlx4_mr_free_icm(dev, mpt->key);
+                                       state = RES_MPT_RESERVED;
+                                       break;
+
+                               case RES_MPT_HW:
+                                       in_param = slave;
+                                       err = mlx4_cmd(dev, in_param, mptn, 0,
+                                                    MLX4_CMD_HW2SW_MPT,
+                                                    MLX4_CMD_TIME_CLASS_A,
+                                                    MLX4_CMD_NATIVE);
+                                       if (err)
+                                               mlx4_dbg(dev, "rem_slave_mrs: failed"
+                                                        " to move slave %d mpt %d to"
+                                                        " SW ownership\n",
+                                                        slave, mptn);
+                                       if (mpt->mtt)
+                                               atomic_dec(&mpt->mtt->ref_count);
+                                       state = RES_MPT_MAPPED;
+                                       break;
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker =
+               &priv->mfunc.master.res_tracker;
+       struct list_head *mtt_list =
+               &tracker->slave_list[slave].res_list[RES_MTT];
+       struct res_mtt *mtt;
+       struct res_mtt *tmp;
+       int state;
+       LIST_HEAD(tlist);
+       int base;
+       int err;
+
+       err = move_all_busy(dev, slave, RES_MTT);
+       if (err)
+               mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (mtt->com.owner == slave) {
+                       base = mtt->com.res_id;
+                       state = mtt->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_MTT_ALLOCATED:
+                                       __mlx4_free_mtt_range(dev, base,
+                                                             mtt->order);
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_MTT],
+                                                         base);
+                                       list_del(&mtt->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(mtt);
+                                       state = 0;
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *eq_list =
+               &tracker->slave_list[slave].res_list[RES_EQ];
+       struct res_eq *eq;
+       struct res_eq *tmp;
+       int err;
+       int state;
+       LIST_HEAD(tlist);
+       int eqn;
+       struct mlx4_cmd_mailbox *mailbox;
+
+       err = move_all_busy(dev, slave, RES_EQ);
+       if (err)
+               mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+                         "busy for slave %d\n", slave);
+
+       spin_lock_irq(mlx4_tlock(dev));
+       list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+               spin_unlock_irq(mlx4_tlock(dev));
+               if (eq->com.owner == slave) {
+                       eqn = eq->com.res_id;
+                       state = eq->com.from_state;
+                       while (state != 0) {
+                               switch (state) {
+                               case RES_EQ_RESERVED:
+                                       spin_lock_irq(mlx4_tlock(dev));
+                                       radix_tree_delete(&tracker->res_tree[RES_EQ],
+                                                         eqn);
+                                       list_del(&eq->com.list);
+                                       spin_unlock_irq(mlx4_tlock(dev));
+                                       kfree(eq);
+                                       state = 0;
+                                       break;
+
+                               case RES_EQ_HW:
+                                       mailbox = mlx4_alloc_cmd_mailbox(dev);
+                                       if (IS_ERR(mailbox)) {
+                                               cond_resched();
+                                               continue;
+                                       }
+                                       err = mlx4_cmd_box(dev, slave, 0,
+                                                          eqn & 0xff, 0,
+                                                          MLX4_CMD_HW2SW_EQ,
+                                                          MLX4_CMD_TIME_CLASS_A,
+                                                          MLX4_CMD_NATIVE);
+                                       mlx4_dbg(dev, "rem_slave_eqs: failed"
+                                                " to move slave %d eqs %d to"
+                                                " SW ownership\n", slave, eqn);
+                                       mlx4_free_cmd_mailbox(dev, mailbox);
+                                       if (!err) {
+                                               atomic_dec(&eq->mtt->ref_count);
+                                               state = RES_EQ_RESERVED;
+                                       }
+                                       break;
+
+                               default:
+                                       state = 0;
+                               }
+                       }
+               }
+               spin_lock_irq(mlx4_tlock(dev));
+       }
+       spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+       /*VLAN*/
+       rem_slave_macs(dev, slave);
+       rem_slave_qps(dev, slave);
+       rem_slave_srqs(dev, slave);
+       rem_slave_cqs(dev, slave);
+       rem_slave_mrs(dev, slave);
+       rem_slave_eqs(dev, slave);
+       rem_slave_mtts(dev, slave);
+       mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
index e2337a7..8024982 100644 (file)
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
        int err = 0;
 
        err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
-                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+                          MLX4_CMD_WRAPPED);
        if (err) {
                mlx4_err(dev, "Sense command failed for port: %d\n", port);
                return err;
index 9cbf3fc..2823fff 100644 (file)
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#include <linux/init.h>
+
 #include <linux/mlx4/cmd.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 #include "mlx4.h"
 #include "icm.h"
 
-struct mlx4_srq_context {
-       __be32                  state_logsize_srqn;
-       u8                      logstride;
-       u8                      reserved1;
-       __be16                  xrcd;
-       __be32                  pg_offset_cqn;
-       u32                     reserved2;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  pd;
-       __be16                  limit_watermark;
-       __be16                  wqe_cnt;
-       u16                     reserved4;
-       __be16                  wqe_counter;
-       u32                     reserved5;
-       __be64                  db_rec_addr;
-};
-
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
-                       MLX4_CMD_TIME_CLASS_A);
+       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+                       MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+                       MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
 {
        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
                            mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
 {
        return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
-                       MLX4_CMD_TIME_CLASS_B);
+                       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
 }
 
 static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
        return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
+                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
 
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
-                  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
 {
        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_srq_context *srq_context;
-       u64 mtt_addr;
        int err;
 
-       srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
-       if (srq->srqn == -1)
+
+       *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+       if (*srqn == -1)
                return -ENOMEM;
 
-       err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+       err = mlx4_table_get(dev, &srq_table->table, *srqn);
        if (err)
                goto err_out;
 
-       err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+       err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
        if (err)
                goto err_put;
+       return 0;
+
+err_put:
+       mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+       mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+       return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+       u64 out_param;
+       int err;
+
+       if (mlx4_is_mfunc(dev)) {
+               err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+                                  RES_OP_RESERVE_AND_MAP,
+                                  MLX4_CMD_ALLOC_RES,
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *srqn = get_param_l(&out_param);
+
+               return err;
+       }
+       return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+       mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+       mlx4_table_put(dev, &srq_table->table, srqn);
+       mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+       u64 in_param;
+
+       if (mlx4_is_mfunc(dev)) {
+               set_param_l(&in_param, srqn);
+               if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+                            MLX4_CMD_FREE_RES,
+                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+                       mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+               return;
+       }
+       __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+                  struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_srq_context *srq_context;
+       u64 mtt_addr;
+       int err;
+
+       err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+       if (err)
+               return err;
 
        spin_lock_irq(&srq_table->lock);
        err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
        spin_unlock_irq(&srq_table->lock);
        if (err)
-               goto err_cmpt_put;
+               goto err_icm;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
        radix_tree_delete(&srq_table->tree, srq->srqn);
        spin_unlock_irq(&srq_table->lock);
 
-err_cmpt_put:
-       mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+       mlx4_srq_free_icm(dev, srq->srqn);
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
                complete(&srq->free);
        wait_for_completion(&srq->free);
 
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+       mlx4_srq_free_icm(dev, srq->srqn);
 }
 EXPORT_SYMBOL_GPL(mlx4_srq_free);
 
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
        spin_lock_init(&srq_table->lock);
        INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+       if (mlx4_is_slave(dev))
+               return 0;
 
        err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
                               dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
 
 void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
 {
+       if (mlx4_is_slave(dev))
+               return;
        mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 }
index d10c2e1..1ea811c 100644 (file)
@@ -42,6 +42,8 @@ config KS8851
        select NET_CORE
        select MII
        select CRC32
+       select MISC_DEVICES
+       select EEPROM_93CX6
        ---help---
          SPI driver for Micrel KS8851 SPI attached network chip.
 
index 4a6ae05..75ec87a 100644 (file)
@@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = {
        .remove         = ks8842_remove,
 };
 
-static int __init ks8842_init(void)
-{
-       return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
-       platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
 
 MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
index f56743a..6b35e7d 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/cache.h>
 #include <linux/crc32.h>
 #include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
 
 #include <linux/spi/spi.h>
 
@@ -82,6 +83,7 @@ union ks8851_tx_hdr {
  * @rc_ccr: Cached copy of KS_CCR.
  * @rc_rxqcr: Cached copy of KS_RXQCR.
  * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
  *
  * The @lock ensures that the chip is protected when certain operations are
  * in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@ struct ks8851_net {
        struct spi_message      spi_msg2;
        struct spi_transfer     spi_xfer1;
        struct spi_transfer     spi_xfer2[2];
+
+       struct eeprom_93cx6     eeprom;
 };
 
 static int msg_enable;
@@ -343,6 +347,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
 }
 
 /**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+       unsigned pmecr;
+
+       netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+       pmecr = ks8851_rdreg16(ks, KS_PMECR);
+       pmecr &= ~PMECR_PM_MASK;
+       pmecr |= pwrmode;
+
+       ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
  * ks8851_write_mac_addr - write mac address to device registers
  * @dev: The network device
  *
@@ -358,8 +382,15 @@ static int ks8851_write_mac_addr(struct net_device *dev)
 
        mutex_lock(&ks->lock);
 
+       /*
+        * Wake up chip in case it was powered off when stopped; otherwise,
+        * the first write to the MAC address does not take effect.
+        */
+       ks8851_set_powermode(ks, PMECR_PM_NORMAL);
        for (i = 0; i < ETH_ALEN; i++)
                ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+       if (!netif_running(dev))
+               ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
 
        mutex_unlock(&ks->lock);
 
@@ -367,21 +398,47 @@ static int ks8851_write_mac_addr(struct net_device *dev)
 }
 
 /**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+       struct ks8851_net *ks = netdev_priv(dev);
+       int i;
+
+       mutex_lock(&ks->lock);
+
+       for (i = 0; i < ETH_ALEN; i++)
+               dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+       mutex_unlock(&ks->lock);
+}
+
+/**
  * ks8851_init_mac - initialise the mac address
  * @ks: The device structure
  *
  * Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
  * to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
  */
 static void ks8851_init_mac(struct ks8851_net *ks)
 {
        struct net_device *dev = ks->netdev;
 
+       /* first, try reading what we've got already */
+       if (ks->rc_ccr & CCR_EEPROM) {
+               ks8851_read_mac_addr(dev);
+               if (is_valid_ether_addr(dev->dev_addr))
+                       return;
+
+               netdev_err(ks->netdev, "invalid mac address read %pM\n",
+                               dev->dev_addr);
+       }
+
        random_ether_addr(dev->dev_addr);
        ks8851_write_mac_addr(dev);
 }
@@ -739,26 +796,6 @@ static void ks8851_tx_work(struct work_struct *work)
 }
 
 /**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
-       unsigned pmecr;
-
-       netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
-       pmecr = ks8851_rdreg16(ks, KS_PMECR);
-       pmecr &= ~PMECR_PM_MASK;
-       pmecr |= pwrmode;
-
-       ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
  * ks8851_net_open - open network device
  * @dev: The network device being opened.
  *
@@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-/* Companion eeprom access */
-
-enum { /* EEPROM programming states */
-       EEPROM_CONTROL,
-       EEPROM_ADDRESS,
-       EEPROM_DATA,
-       EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- *  - on period start: set clock high and read value on bus
- *  - on period / 2: set clock low and program value on bus
- *  - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
-       struct ks8851_net *ks = netdev_priv(dev);
-       int eepcr;
-       int ctrl = EEPROM_OP_READ;
-       int state = EEPROM_CONTROL;
-       int bit_count = EEPROM_OP_LEN - 1;
-       unsigned int data = 0;
-       int dummy;
-       unsigned int addr_len;
-
-       addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
-       /* start transaction: chip select high, authorize write */
-       mutex_lock(&ks->lock);
-       eepcr = EEPCR_EESA | EEPCR_EESRWA;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr |= EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-       while (state != EEPROM_COMPLETE) {
-               /* falling clock period starts... */
-               /* set EED_IO pin for control and address */
-               eepcr &= ~EEPCR_EEDO;
-               switch (state) {
-               case EEPROM_CONTROL:
-                       eepcr |= ((ctrl >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0) {
-                               bit_count = addr_len - 1;
-                               state = EEPROM_ADDRESS;
-                       }
-                       break;
-               case EEPROM_ADDRESS:
-                       eepcr |= ((addr >> bit_count) & 1) << 2;
-                       bit_count--;
-                       break;
-               case EEPROM_DATA:
-                       /* Change to receive mode */
-                       eepcr &= ~EEPCR_EESRWA;
-                       break;
-               }
-
-               /* lower clock  */
-               eepcr &= ~EEPCR_EESCK;
-
-               mutex_lock(&ks->lock);
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* waitread period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-
-               /* rising clock period starts... */
-
-               /* raise clock */
-               mutex_lock(&ks->lock);
-               eepcr |= EEPCR_EESCK;
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* Manage read */
-               switch (state) {
-               case EEPROM_ADDRESS:
-                       if (bit_count < 0) {
-                               bit_count = EEPROM_DATA_LEN - 1;
-                               state = EEPROM_DATA;
-                       }
-                       break;
-               case EEPROM_DATA:
-                       mutex_lock(&ks->lock);
-                       dummy = ks8851_rdreg16(ks, KS_EEPCR);
-                       mutex_unlock(&ks->lock);
-                       data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
-                       if (bit_count-- <= 0)
-                               state = EEPROM_COMPLETE;
-                       break;
-               }
-
-               /* wait period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-       }
-
-       /* close transaction */
-       mutex_lock(&ks->lock);
-       eepcr &= ~EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr = 0;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-       return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- *  - on period start: set clock high
- *  - on period / 2: set clock low and program value on bus
- *  - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
-                                       unsigned int addr, unsigned int data)
-{
-       struct ks8851_net *ks = netdev_priv(dev);
-       int eepcr;
-       int state = EEPROM_CONTROL;
-       int bit_count = EEPROM_OP_LEN - 1;
-       unsigned int addr_len;
-
-       addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
-       switch (op) {
-       case EEPROM_OP_EWEN:
-               addr = 0x30;
-       break;
-       case EEPROM_OP_EWDS:
-               addr = 0;
-               break;
-       }
-
-       /* start transaction: chip select high, authorize write */
-       mutex_lock(&ks->lock);
-       eepcr = EEPCR_EESA | EEPCR_EESRWA;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr |= EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-       while (state != EEPROM_COMPLETE) {
-               /* falling clock period starts... */
-               /* set EED_IO pin for control and address */
-               eepcr &= ~EEPCR_EEDO;
-               switch (state) {
-               case EEPROM_CONTROL:
-                       eepcr |= ((op >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0) {
-                               bit_count = addr_len - 1;
-                               state = EEPROM_ADDRESS;
-                       }
-                       break;
-               case EEPROM_ADDRESS:
-                       eepcr |= ((addr >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0) {
-                               if (op == EEPROM_OP_WRITE) {
-                                       bit_count = EEPROM_DATA_LEN - 1;
-                                       state = EEPROM_DATA;
-                               } else {
-                                       state = EEPROM_COMPLETE;
-                               }
-                       }
-                       break;
-               case EEPROM_DATA:
-                       eepcr |= ((data >> bit_count) & 1) << 2;
-                       if (bit_count-- <= 0)
-                               state = EEPROM_COMPLETE;
-                       break;
-               }
-
-               /* lower clock  */
-               eepcr &= ~EEPCR_EESCK;
-
-               mutex_lock(&ks->lock);
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* wait period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-
-               /* rising clock period starts... */
-
-               /* raise clock */
-               eepcr |= EEPCR_EESCK;
-               mutex_lock(&ks->lock);
-               ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-               mutex_unlock(&ks->lock);
-
-               /* wait period / 2 */
-               udelay(EEPROM_SK_PERIOD / 2);
-       }
-
-       /* close transaction */
-       mutex_lock(&ks->lock);
-       eepcr &= ~EEPCR_EECS;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       eepcr = 0;
-       ks8851_wrreg16(ks, KS_EEPCR, eepcr);
-       mutex_unlock(&ks->lock);
-
-}
-
 /* ethtool support */
 
 static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev)
        return mii_nway_restart(&ks->mii);
 }
 
-static int ks8851_get_eeprom_len(struct net_device *dev)
-{
-       struct ks8851_net *ks = netdev_priv(dev);
-       return ks->eeprom_size;
-}
+/* EEPROM support */
 
-static int ks8851_get_eeprom(struct net_device *dev,
-                           struct ethtool_eeprom *eeprom, u8 *bytes)
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
 {
-       struct ks8851_net *ks = netdev_priv(dev);
-       u16 *eeprom_buff;
-       int first_word;
-       int last_word;
-       int ret_val = 0;
-       u16 i;
-
-       if (eeprom->len == 0)
-               return -EINVAL;
+       struct ks8851_net *ks = ee->data;
+       unsigned val;
 
-       if (eeprom->len > ks->eeprom_size)
-               return -EINVAL;
+       val = ks8851_rdreg16(ks, KS_EEPCR);
 
-       eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+       ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+       ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+       ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
 
-       first_word = eeprom->offset >> 1;
-       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+       struct ks8851_net *ks = ee->data;
+       unsigned val = EEPCR_EESA;      /* default - eeprom access on */
+
+       if (ee->drive_data)
+               val |= EEPCR_EESRWA;
+       if (ee->reg_data_in)
+               val |= EEPCR_EEDO;
+       if (ee->reg_data_clock)
+               val |= EEPCR_EESCK;
+       if (ee->reg_chip_select)
+               val |= EEPCR_EECS;
+
+       ks8851_wrreg16(ks, KS_EEPCR, val);
+}
 
-       eeprom_buff = kmalloc(sizeof(u16) *
-                       (last_word - first_word + 1), GFP_KERNEL);
-       if (!eeprom_buff)
-               return -ENOMEM;
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+       if (!(ks->rc_ccr & CCR_EEPROM))
+               return -ENOENT;
 
-       for (i = 0; i < last_word - first_word + 1; i++)
-               eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+       mutex_lock(&ks->lock);
 
-       /* Device's eeprom is little-endian, word addressable */
-       for (i = 0; i < last_word - first_word + 1; i++)
-               le16_to_cpus(&eeprom_buff[i]);
+       /* start with clock low, cs high */
+       ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+       return 0;
+}
 
-       memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
-       kfree(eeprom_buff);
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+       unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
 
-       return ret_val;
+       ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+       mutex_unlock(&ks->lock);
 }
 
+#define KS_EEPROM_MAGIC (0x00008851)
+
 static int ks8851_set_eeprom(struct net_device *dev,
-                           struct ethtool_eeprom *eeprom, u8 *bytes)
+                            struct ethtool_eeprom *ee, u8 *data)
 {
        struct ks8851_net *ks = netdev_priv(dev);
-       u16 *eeprom_buff;
-       void *ptr;
-       int max_len;
-       int first_word;
-       int last_word;
-       int ret_val = 0;
-       u16 i;
-
-       if (eeprom->len == 0)
-               return -EOPNOTSUPP;
-
-       if (eeprom->len > ks->eeprom_size)
+       int offset = ee->offset;
+       int len = ee->len;
+       u16 tmp;
+
+       /* currently only support byte writing */
+       if (len != 1)
                return -EINVAL;
 
-       if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
-               return -EFAULT;
+       if (ee->magic != KS_EEPROM_MAGIC)
+               return -EINVAL;
 
-       first_word = eeprom->offset >> 1;
-       last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-       max_len = (last_word - first_word + 1) * 2;
-       eeprom_buff = kmalloc(max_len, GFP_KERNEL);
-       if (!eeprom_buff)
-               return -ENOMEM;
+       if (ks8851_eeprom_claim(ks))
+               return -ENOENT;
 
-       ptr = (void *)eeprom_buff;
+       eeprom_93cx6_wren(&ks->eeprom, true);
 
-       if (eeprom->offset & 1) {
-               /* need read/modify/write of first changed EEPROM word */
-               /* only the second byte of the word is being modified */
-               eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
-               ptr++;
+       /* ethtool currently only supports writing bytes, which means
+        * we have to read/modify/write our 16bit EEPROMs */
+
+       eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+       if (offset & 1) {
+               tmp &= 0xff;
+               tmp |= *data << 8;
+       } else {
+               tmp &= 0xff00;
+               tmp |= *data;
        }
-       if ((eeprom->offset + eeprom->len) & 1)
-               /* need read/modify/write of last changed EEPROM word */
-               /* only the first byte of the word is being modified */
-               eeprom_buff[last_word - first_word] =
-                                       ks8851_eeprom_read(dev, last_word);
 
+       eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+       eeprom_93cx6_wren(&ks->eeprom, false);
 
-       /* Device's eeprom is little-endian, word addressable */
-       le16_to_cpus(&eeprom_buff[0]);
-       le16_to_cpus(&eeprom_buff[last_word - first_word]);
+       ks8851_eeprom_release(ks);
 
-       memcpy(ptr, bytes, eeprom->len);
+       return 0;
+}
 
-       for (i = 0; i < last_word - first_word + 1; i++)
-               eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+static int ks8851_get_eeprom(struct net_device *dev,
+                            struct ethtool_eeprom *ee, u8 *data)
+{
+       struct ks8851_net *ks = netdev_priv(dev);
+       int offset = ee->offset;
+       int len = ee->len;
 
-       ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+       /* must be 2 byte aligned */
+       if (len & 1 || offset & 1)
+               return -EINVAL;
 
-       for (i = 0; i < last_word - first_word + 1; i++) {
-               ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
-                                                       eeprom_buff[i]);
-               mdelay(EEPROM_WRITE_TIME);
-       }
+       if (ks8851_eeprom_claim(ks))
+               return -ENOENT;
+
+       ee->magic = KS_EEPROM_MAGIC;
 
-       ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+       eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+       ks8851_eeprom_release(ks);
 
-       kfree(eeprom_buff);
-       return ret_val;
+       return 0;
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+       struct ks8851_net *ks = netdev_priv(dev);
+
+       /* currently, we assume it is an 93C46 attached, so return 128 */
+       return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
 }
 
 static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi)
        spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
        spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
 
+       /* setup EEPROM state */
+
+       ks->eeprom.data = ks;
+       ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+       ks->eeprom.register_read = ks8851_eeprom_regread;
+       ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
        /* setup mii state */
        ks->mii.dev             = ndev;
        ks->mii.phy_id          = 1,
@@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi)
                goto err_netdev;
        }
 
-       netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+       netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
                    CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
-                   ndev->dev_addr, ndev->irq);
+                   ndev->dev_addr, ndev->irq,
+                   ks->rc_ccr & CCR_EEPROM ? "has" : "no");
 
        return 0;
 
index 537fb06..b0fae86 100644 (file)
@@ -16,7 +16,7 @@
 #define CCR_32PIN                              (1 << 0)
 
 /* MAC address registers */
-#define KS_MAR(_m)                             0x15 - (_m)
+#define KS_MAR(_m)                             (0x15 - (_m))
 #define KS_MARL                                        0x10
 #define KS_MARM                                        0x12
 #define KS_MARH                                        0x14
 #define KS_EEPCR                               0x22
 #define EEPCR_EESRWA                           (1 << 5)
 #define EEPCR_EESA                             (1 << 4)
-#define EEPCR_EESB_OFFSET                      3
-#define EEPCR_EESB                             (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB                             (1 << 3)
 #define EEPCR_EEDO                             (1 << 2)
 #define EEPCR_EESCK                            (1 << 1)
 #define EEPCR_EECS                             (1 << 0)
 
-#define EEPROM_OP_LEN                          3       /* bits:*/
-#define EEPROM_OP_READ                         0x06
-#define EEPROM_OP_EWEN                         0x04
-#define EEPROM_OP_WRITE                                0x05
-#define EEPROM_OP_EWDS                         0x14
-
-#define EEPROM_DATA_LEN                                16      /* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME                      4       /* wrt ack time in ms */
-#define EEPROM_SK_PERIOD                       400     /* in us */
-
 #define KS_MBIR                                        0x24
 #define MBIR_TXMBF                             (1 << 12)
 #define MBIR_TXMBFA                            (1 << 11)
index d19c849..e58e78e 100644 (file)
@@ -1500,8 +1500,7 @@ static int ks_hw_init(struct ks_net *ks)
        ks->all_mcast = 0;
        ks->mcast_lst_size = 0;
 
-       ks->frame_head_info = (struct type_frame_head *) \
-               kmalloc(MHEADER_SIZE, GFP_KERNEL);
+       ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
        if (!ks->frame_head_info) {
                pr_err("Error: Fail to allocate frame memory\n");
                return false;
@@ -1659,18 +1658,7 @@ static struct platform_driver ks8851_platform_driver = {
        .remove = __devexit_p(ks8851_remove),
 };
 
-static int __init ks8851_init(void)
-{
-       return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
-       platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
 
 MODULE_DESCRIPTION("KS8851 MLL Network driver");
 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
index 7ece990..a718865 100644 (file)
 /* Change default LED mode. */
 #define SET_DEFAULT_LED                        LED_SPEED_DUPLEX_ACT
 
-#define MAC_ADDR_LEN                   6
-#define MAC_ADDR_ORDER(i)              (MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i)              (ETH_ALEN - 1 - (i))
 
 #define MAX_ETHERNET_BODY_SIZE         1500
 #define ETHERNET_HEADER_SIZE           14
@@ -1043,7 +1042,7 @@ enum {
  * @valid:     Valid setting indicating the entry is being used.
  */
 struct ksz_mac_table {
-       u8 mac_addr[MAC_ADDR_LEN];
+       u8 mac_addr[ETH_ALEN];
        u16 vid;
        u8 fid;
        u8 ports;
@@ -1187,8 +1186,8 @@ struct ksz_switch {
        u8 diffserv[DIFFSERV_ENTRIES];
        u8 p_802_1p[PRIO_802_1P_ENTRIES];
 
-       u8 br_addr[MAC_ADDR_LEN];
-       u8 other_addr[MAC_ADDR_LEN];
+       u8 br_addr[ETH_ALEN];
+       u8 other_addr[ETH_ALEN];
 
        u8 broad_per;
        u8 member;
@@ -1292,14 +1291,14 @@ struct ksz_hw {
        int tx_int_mask;
        int tx_size;
 
-       u8 perm_addr[MAC_ADDR_LEN];
-       u8 override_addr[MAC_ADDR_LEN];
-       u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+       u8 perm_addr[ETH_ALEN];
+       u8 override_addr[ETH_ALEN];
+       u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
        u8 addr_list_size;
        u8 mac_override;
        u8 promiscuous;
        u8 all_multi;
-       u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+       u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
        u8 multi_bits[HW_MULTICAST_SIZE];
        u8 multi_list_size;
 
@@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
        static const u8 mask[] = { 0x3F };
        static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
 
-       hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+       hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
 }
 
 /**
@@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
 {
        static const u8 mask[] = { 0x3F };
 
-       hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+       hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
 }
 
 /**
@@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw)
 {
        int i;
 
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
                        hw->io + KS884X_ADDR_0_OFFSET + i);
 
@@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw)
 {
        int i;
 
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
                        KS884X_ADDR_0_OFFSET + i);
 
        if (!hw->mac_override) {
-               memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+               memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
                if (empty_addr(hw->override_addr)) {
-                       memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
-                               MAC_ADDR_LEN);
+                       memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
                        memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
-                               MAC_ADDR_LEN);
+                              ETH_ALEN);
                        hw->override_addr[5] += hw->id;
                        hw_set_addr(hw);
                }
@@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
        int i;
        int j = ADDITIONAL_ENTRIES;
 
-       if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+       if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
                return 0;
        for (i = 0; i < hw->addr_list_size; i++) {
-               if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+               if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
                        return 0;
                if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
                        j = i;
        }
        if (j < ADDITIONAL_ENTRIES) {
-               memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+               memcpy(hw->address[j], mac_addr, ETH_ALEN);
                hw_ena_add_addr(hw, j, hw->address[j]);
                return 0;
        }
@@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
        int i;
 
        for (i = 0; i < hw->addr_list_size; i++) {
-               if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
-                       memset(hw->address[i], 0, MAC_ADDR_LEN);
+               if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+                       memset(hw->address[i], 0, ETH_ALEN);
                        writel(0, hw->io + ADD_ADDR_INCR * i +
                                KS_ADD_ADDR_0_HI);
                        return 0;
@@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info)
  */
 static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
 {
-       desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
-               GFP_KERNEL);
+       desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+                                 GFP_KERNEL);
        if (!desc_info->ring)
                return 1;
-       memset((void *) desc_info->ring, 0,
-               sizeof(struct ksz_desc) * desc_info->alloc);
        hw_init_desc(desc_info, transmit);
        return 0;
 }
@@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
                hw_del_addr(hw, dev->dev_addr);
        else {
                hw->mac_override = 1;
-               memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+               memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
        }
 
        memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
                netdev_for_each_mc_addr(ha, dev) {
                        if (i >= MAX_MULTICAST_LIST)
                                break;
-                       memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+                       memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
                }
                hw->multi_list_size = (u8) i;
                hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev,
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(hw_priv->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+               sizeof(info->bus_info));
 }
 
 /**
@@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
  *
  * Return 0 if successful; otherwise an error code.
  */
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
@@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
        int num;
 
        i = j = num = got_num = 0;
-       while (j < MAC_ADDR_LEN) {
+       while (j < ETH_ALEN) {
                if (macaddr[i]) {
                        int digit;
 
@@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
                }
                i++;
        }
-       if (MAC_ADDR_LEN == j) {
+       if (ETH_ALEN == j) {
                if (MAIN_PORT == port)
                        hw_priv->hw.mac_override = 1;
        }
@@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
 
        /* Multiple device interfaces mode requires a second MAC address. */
        if (hw->dev_count > 1) {
-               memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+               memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
                read_other_addr(hw);
                if (mac1addr[0] != ':')
                        get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
                dev->irq = pdev->irq;
                if (MAIN_PORT == i)
                        memcpy(dev->dev_addr, hw_priv->hw.override_addr,
-                               MAC_ADDR_LEN);
+                              ETH_ALEN);
                else {
-                       memcpy(dev->dev_addr, sw->other_addr,
-                               MAC_ADDR_LEN);
+                       memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
                        if (!memcmp(sw->other_addr, hw->override_addr,
-                                       MAC_ADDR_LEN))
+                                   ETH_ALEN))
                                dev->dev_addr[5] += port->first_port;
                }
 
index 0778edc..20b72ec 100644 (file)
@@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
         * access to avoid theoretical race condition with functions that
         * change NETIF_F_LRO flag at runtime.
         */
-       bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+       bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
 
        while (rx_done->entry[idx].length != 0 && work_done < budget) {
                length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
        return 0;
 }
 
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        if (!(features & NETIF_F_RXCSUM))
                features &= ~NETIF_F_LRO;
index fc7c6a9..5b89fd3 100644 (file)
@@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = {
        },
 };
 
-static int __init jazz_sonic_init_module(void)
-{
-       return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
-       platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
index a2eacbf..70367d7 100644 (file)
@@ -643,15 +643,4 @@ static struct platform_driver mac_sonic_driver = {
        },
 };
 
-static int __init mac_sonic_init_module(void)
-{
-       return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
-       platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
index 6ca047a..ac7b16b 100644 (file)
@@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev)
 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct netdev_private *np = netdev_priv(dev);
-       strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int get_regs_len(struct net_device *dev)
index 2b8f64d..c24b46c 100644 (file)
@@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev,
 static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
 {
        struct ns83820 *dev = PRIV(ndev);
-       strcpy(info->driver, "ns83820");
-       strcpy(info->version, VERSION);
-       strcpy(info->bus_info, pci_name(dev->pci_dev));
+       strlcpy(info->driver, "ns83820", sizeof(info->driver));
+       strlcpy(info->version, VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
 }
 
 static u32 ns83820_get_link(struct net_device *ndev)
index ccf61b9..e01c0a0 100644 (file)
@@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = {
        },
 };
 
-static int __init xtsonic_init(void)
-{
-       return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
-       platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
index c27fb3d..97f63e1 100644 (file)
@@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
 {
        struct s2io_nic *sp = netdev_priv(dev);
 
-       strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
-       strncpy(info->version, s2io_driver_version, sizeof(info->version));
-       strncpy(info->fw_version, "", sizeof(info->fw_version));
-       strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+       strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+       strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
        info->regdump_len = XENA_REG_SPACE;
        info->eedump_len = XENA_EEPROM_SPACE;
 }
@@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
        }
 }
 
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct s2io_nic *sp = netdev_priv(dev);
-       u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+       netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
 
        if (changed && netif_running(dev)) {
                int rc;
index a83197d..ef76725 100644 (file)
@@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data)
        mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
 }
 
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        /* Enabling RTH requires some of the logic in vxge_device_register and a
         * vpath reset.  Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct vxgedev *vdev = netdev_priv(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
 
        if (!(changed & NETIF_F_RXHASH))
                return 0;
@@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
  *
  * Add the vlan id to the devices vlan id table
  */
-static void
+static int
 vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                vxge_hw_vpath_vid_add(vpath->handle, vid);
        }
        set_bit(vid, vdev->active_vlans);
+       return 0;
 }
 
 /**
@@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  *
  * Remove the vlan id from the device's vlan id table
  */
-static void
+static int
 vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        vxge_debug_entryexit(VXGE_TRACE,
                "%s:%d  Exiting...", __func__, __LINE__);
        clear_bit(vid, vdev->active_vlans);
+       return 0;
 }
 
 static const struct net_device_ops vxge_netdev_ops = {
index f1bfb8f..b75a049 100644 (file)
@@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = {
        },
 };
 
-static int __init w90p910_ether_init(void)
-{
-       return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
-       platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
 
 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
 MODULE_DESCRIPTION("w90p910 MAC driver!");
index 1c61d36..4c4e7f4 100644 (file)
@@ -65,7 +65,8 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/prefetch.h>
-#include  <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
 
 #include <asm/irq.h>
 #include <asm/system.h>
@@ -736,6 +737,16 @@ struct nv_skb_map {
  * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  *     needs netdev_priv(dev)->lock :-(
  * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ *   integer wraparound in the NIC stats registers, at low frequency
+ *   (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
  */
 
 /* in dev: base, irq */
@@ -745,9 +756,10 @@ struct fe_priv {
        struct net_device *dev;
        struct napi_struct napi;
 
-       /* General data:
-        * Locking: spin_lock(&np->lock); */
+       /* hardware stats are updated in syscall and timer */
+       spinlock_t hwstats_lock;
        struct nv_ethtool_stats estats;
+
        int in_shutdown;
        u32 linkspeed;
        int duplex;
@@ -798,6 +810,13 @@ struct fe_priv {
        u32 nic_poll_irq;
        int rx_ring_size;
 
+       /* RX software stats */
+       struct u64_stats_sync swstats_rx_syncp;
+       u64 stat_rx_packets;
+       u64 stat_rx_bytes; /* not always available in HW */
+       u64 stat_rx_missed_errors;
+       u64 stat_rx_dropped;
+
        /* media detection workaround.
         * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
         */
@@ -820,6 +839,12 @@ struct fe_priv {
        struct nv_skb_map *tx_end_flip;
        int tx_stop;
 
+       /* TX software stats */
+       struct u64_stats_sync swstats_tx_syncp;
+       u64 stat_tx_packets; /* not always available in HW */
+       u64 stat_tx_bytes;
+       u64 stat_tx_dropped;
+
        /* msi/msi-x fields */
        u32 msi_flags;
        struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -892,6 +917,11 @@ enum {
 static int dma_64bit = NV_DMA_64BIT_ENABLED;
 
 /*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
+/*
  * Crossover Detection
  * Realtek 8201 phy + some OEM boards do not work properly.
  */
@@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev)
        pci_push(base);
 }
 
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
 
+       /* If it happens that this is run in top-half context, then
+        * replace the spin_lock of hwstats_lock with
+        * spin_lock_irqsave() in calling functions. */
+       WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+       assert_spin_locked(&np->hwstats_lock);
+
+       /* query hardware */
        np->estats.tx_bytes += readl(base + NvRegTxCnt);
        np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
        np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,40 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev)
 }
 
 /*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
  * Get latest stats value from the nic.
  * Called with read_lock(&dev_base_lock) held for read -
  * only synchronized against unregister_netdevice.
  */
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+       __acquires(&netdev_priv(dev)->hwstats_lock)
+       __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct fe_priv *np = netdev_priv(dev);
+       unsigned int syncp_start;
+
+       /*
+        * Note: because HW stats are not always available and for
+        * consistency reasons, the following ifconfig stats are
+        * managed by software: rx_bytes, tx_bytes, rx_packets and
+        * tx_packets. The related hardware stats reported by ethtool
+        * should be equivalent to these ifconfig stats, with 4
+        * additional bytes per packet (Ethernet FCS CRC), except for
+        * tx_packets when TSO kicks in.
+        */
+
+       /* software stats */
+       do {
+               syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+               storage->rx_packets       = np->stat_rx_packets;
+               storage->rx_bytes         = np->stat_rx_bytes;
+               storage->rx_dropped       = np->stat_rx_dropped;
+               storage->rx_missed_errors = np->stat_rx_missed_errors;
+       } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+       do {
+               syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+               storage->tx_packets = np->stat_tx_packets;
+               storage->tx_bytes   = np->stat_tx_bytes;
+               storage->tx_dropped = np->stat_tx_dropped;
+       } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
 
        /* If the nic supports hw counters then retrieve latest values */
-       if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
-               nv_get_hw_stats(dev);
+       if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+               spin_lock_bh(&np->hwstats_lock);
 
-               /*
-                * Note: because HW stats are not always available and
-                * for consistency reasons, the following ifconfig
-                * stats are managed by software: rx_bytes, tx_bytes,
-                * rx_packets and tx_packets. The related hardware
-                * stats reported by ethtool should be equivalent to
-                * these ifconfig stats, with 4 additional bytes per
-                * packet (Ethernet FCS CRC).
-                */
+               nv_update_stats(dev);
+
+               /* generic stats */
+               storage->rx_errors = np->estats.rx_errors_total;
+               storage->tx_errors = np->estats.tx_errors_total;
 
-               /* copy to net_device stats */
-               dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
-               dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
-               dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
-               dev->stats.rx_over_errors = np->estats.rx_over_errors;
-               dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
-               dev->stats.rx_errors = np->estats.rx_errors_total;
-               dev->stats.tx_errors = np->estats.tx_errors_total;
+               /* meaningful only when NIC supports stats v3 */
+               storage->multicast = np->estats.rx_multicast;
+
+               /* detailed rx_errors */
+               storage->rx_length_errors = np->estats.rx_length_error;
+               storage->rx_over_errors   = np->estats.rx_over_errors;
+               storage->rx_crc_errors    = np->estats.rx_crc_errors;
+               storage->rx_frame_errors  = np->estats.rx_frame_align_error;
+               storage->rx_fifo_errors   = np->estats.rx_drop_frame;
+
+               /* detailed tx_errors */
+               storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+               storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
+
+               spin_unlock_bh(&np->hwstats_lock);
        }
 
-       return &dev->stats;
+       return storage;
 }
 
 /*
@@ -1759,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev)
                                np->put_rx.orig = np->first_rx.orig;
                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
                                np->put_rx_ctx = np->first_rx_ctx;
-               } else
+               } else {
+                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                       np->stat_rx_dropped++;
+                       u64_stats_update_end(&np->swstats_rx_syncp);
                        return 1;
+               }
        }
        return 0;
 }
@@ -1791,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
                                np->put_rx.ex = np->first_rx.ex;
                        if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
                                np->put_rx_ctx = np->first_rx_ctx;
-               } else
+               } else {
+                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                       np->stat_rx_dropped++;
+                       u64_stats_update_end(&np->swstats_rx_syncp);
                        return 1;
+               }
        }
        return 0;
 }
@@ -1849,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev)
                np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
        np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
        np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+       netdev_reset_queue(np->dev);
        np->tx_pkts_in_progress = 0;
        np->tx_change_owner = NULL;
        np->tx_end_flip = NULL;
@@ -1927,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev)
                        np->tx_ring.ex[i].bufhigh = 0;
                        np->tx_ring.ex[i].buflow = 0;
                }
-               if (nv_release_txskb(np, &np->tx_skb[i]))
-                       dev->stats.tx_dropped++;
+               if (nv_release_txskb(np, &np->tx_skb[i])) {
+                       u64_stats_update_begin(&np->swstats_tx_syncp);
+                       np->stat_tx_dropped++;
+                       u64_stats_update_end(&np->swstats_tx_syncp);
+               }
                np->tx_skb[i].dma = 0;
                np->tx_skb[i].dma_len = 0;
                np->tx_skb[i].dma_single = 0;
@@ -2194,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* set tx flags */
        start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+       netdev_sent_queue(np->dev, skb->len);
+
        np->put_tx.orig = put_tx;
 
        spin_unlock_irqrestore(&np->lock, flags);
@@ -2338,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 
        /* set tx flags */
        start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+       netdev_sent_queue(np->dev, skb->len);
+
        np->put_tx.ex = put_tx;
 
        spin_unlock_irqrestore(&np->lock, flags);
@@ -2375,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
        u32 flags;
        int tx_work = 0;
        struct ring_desc *orig_get_tx = np->get_tx.orig;
+       unsigned int bytes_compl = 0;
 
        while ((np->get_tx.orig != np->put_tx.orig) &&
               !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2385,12 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
                if (np->desc_ver == DESC_VER_1) {
                        if (flags & NV_TX_LASTPACKET) {
                                if (flags & NV_TX_ERROR) {
-                                       if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+                                       if ((flags & NV_TX_RETRYERROR)
+                                           && !(flags & NV_TX_RETRYCOUNT_MASK))
                                                nv_legacybackoff_reseed(dev);
                                } else {
-                                       dev->stats.tx_packets++;
-                                       dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+                                       u64_stats_update_begin(&np->swstats_tx_syncp);
+                                       np->stat_tx_packets++;
+                                       np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                                       u64_stats_update_end(&np->swstats_tx_syncp);
                                }
+                               bytes_compl += np->get_tx_ctx->skb->len;
                                dev_kfree_skb_any(np->get_tx_ctx->skb);
                                np->get_tx_ctx->skb = NULL;
                                tx_work++;
@@ -2398,12 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
                } else {
                        if (flags & NV_TX2_LASTPACKET) {
                                if (flags & NV_TX2_ERROR) {
-                                       if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+                                       if ((flags & NV_TX2_RETRYERROR)
+                                           && !(flags & NV_TX2_RETRYCOUNT_MASK))
                                                nv_legacybackoff_reseed(dev);
                                } else {
-                                       dev->stats.tx_packets++;
-                                       dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+                                       u64_stats_update_begin(&np->swstats_tx_syncp);
+                                       np->stat_tx_packets++;
+                                       np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                                       u64_stats_update_end(&np->swstats_tx_syncp);
                                }
+                               bytes_compl += np->get_tx_ctx->skb->len;
                                dev_kfree_skb_any(np->get_tx_ctx->skb);
                                np->get_tx_ctx->skb = NULL;
                                tx_work++;
@@ -2414,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
                        np->get_tx_ctx = np->first_tx_ctx;
        }
+
+       netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
        if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
                np->tx_stop = 0;
                netif_wake_queue(dev);
@@ -2427,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
        u32 flags;
        int tx_work = 0;
        struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+       unsigned long bytes_cleaned = 0;
 
        while ((np->get_tx.ex != np->put_tx.ex) &&
               !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2436,17 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 
                if (flags & NV_TX2_LASTPACKET) {
                        if (flags & NV_TX2_ERROR) {
-                               if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+                               if ((flags & NV_TX2_RETRYERROR)
+                                   && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
                                        if (np->driver_data & DEV_HAS_GEAR_MODE)
                                                nv_gear_backoff_reseed(dev);
                                        else
                                                nv_legacybackoff_reseed(dev);
                                }
                        } else {
-                               dev->stats.tx_packets++;
-                               dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+                               u64_stats_update_begin(&np->swstats_tx_syncp);
+                               np->stat_tx_packets++;
+                               np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+                               u64_stats_update_end(&np->swstats_tx_syncp);
                        }
 
+                       bytes_cleaned += np->get_tx_ctx->skb->len;
                        dev_kfree_skb_any(np->get_tx_ctx->skb);
                        np->get_tx_ctx->skb = NULL;
                        tx_work++;
@@ -2454,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
                        if (np->tx_limit)
                                nv_tx_flip_ownership(dev);
                }
+
                if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
                        np->get_tx.ex = np->first_tx.ex;
                if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
                        np->get_tx_ctx = np->first_tx_ctx;
        }
+
+       netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
        if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
                np->tx_stop = 0;
                netif_wake_queue(dev);
@@ -2477,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev)
        u32 status;
        union ring_type put_tx;
        int saved_tx_limit;
-       int i;
 
        if (np->msi_flags & NV_MSI_X_ENABLED)
                status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
        else
                status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
 
-       netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+       netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
 
-       netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
-       netdev_info(dev, "Dumping tx registers\n");
-       for (i = 0; i <= np->register_size; i += 32) {
-               netdev_info(dev,
-                           "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
-                           i,
-                           readl(base + i + 0), readl(base + i + 4),
-                           readl(base + i + 8), readl(base + i + 12),
-                           readl(base + i + 16), readl(base + i + 20),
-                           readl(base + i + 24), readl(base + i + 28));
-       }
-       netdev_info(dev, "Dumping tx ring\n");
-       for (i = 0; i < np->tx_ring_size; i += 4) {
-               if (!nv_optimized(np)) {
-                       netdev_info(dev,
-                                   "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
-                                   i,
-                                   le32_to_cpu(np->tx_ring.orig[i].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i].flaglen),
-                                   le32_to_cpu(np->tx_ring.orig[i+1].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
-                                   le32_to_cpu(np->tx_ring.orig[i+2].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
-                                   le32_to_cpu(np->tx_ring.orig[i+3].buf),
-                                   le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
-               } else {
+       if (unlikely(debug_tx_timeout)) {
+               int i;
+
+               netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+               netdev_info(dev, "Dumping tx registers\n");
+               for (i = 0; i <= np->register_size; i += 32) {
                        netdev_info(dev,
-                                   "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+                                   "%3x: %08x %08x %08x %08x "
+                                   "%08x %08x %08x %08x\n",
                                    i,
-                                   le32_to_cpu(np->tx_ring.ex[i].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i].flaglen),
-                                   le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i+1].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
-                                   le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i+2].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
-                                   le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
-                                   le32_to_cpu(np->tx_ring.ex[i+3].buflow),
-                                   le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+                                   readl(base + i + 0), readl(base + i + 4),
+                                   readl(base + i + 8), readl(base + i + 12),
+                                   readl(base + i + 16), readl(base + i + 20),
+                                   readl(base + i + 24), readl(base + i + 28));
+               }
+               netdev_info(dev, "Dumping tx ring\n");
+               for (i = 0; i < np->tx_ring_size; i += 4) {
+                       if (!nv_optimized(np)) {
+                               netdev_info(dev,
+                                           "%03x: %08x %08x // %08x %08x "
+                                           "// %08x %08x // %08x %08x\n",
+                                           i,
+                                           le32_to_cpu(np->tx_ring.orig[i].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i].flaglen),
+                                           le32_to_cpu(np->tx_ring.orig[i+1].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+                                           le32_to_cpu(np->tx_ring.orig[i+2].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+                                           le32_to_cpu(np->tx_ring.orig[i+3].buf),
+                                           le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+                       } else {
+                               netdev_info(dev,
+                                           "%03x: %08x %08x %08x "
+                                           "// %08x %08x %08x "
+                                           "// %08x %08x %08x "
+                                           "// %08x %08x %08x\n",
+                                           i,
+                                           le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i].flaglen),
+                                           le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+                                           le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+                                           le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+                                           le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+                                           le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+                       }
                }
        }
 
@@ -2649,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
                                        }
                                        /* the rest are hard errors */
                                        else {
-                                               if (flags & NV_RX_MISSEDFRAME)
-                                                       dev->stats.rx_missed_errors++;
+                                               if (flags & NV_RX_MISSEDFRAME) {
+                                                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                                                       np->stat_rx_missed_errors++;
+                                                       u64_stats_update_end(&np->swstats_rx_syncp);
+                                               }
                                                dev_kfree_skb(skb);
                                                goto next_pkt;
                                        }
@@ -2693,8 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
                skb_put(skb, len);
                skb->protocol = eth_type_trans(skb, dev);
                napi_gro_receive(&np->napi, skb);
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += len;
+               u64_stats_update_begin(&np->swstats_rx_syncp);
+               np->stat_rx_packets++;
+               np->stat_rx_bytes += len;
+               u64_stats_update_end(&np->swstats_rx_syncp);
 next_pkt:
                if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
                        np->get_rx.orig = np->first_rx.orig;
@@ -2777,8 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
                                __vlan_hwaccel_put_tag(skb, vid);
                        }
                        napi_gro_receive(&np->napi, skb);
-                       dev->stats.rx_packets++;
-                       dev->stats.rx_bytes += len;
+                       u64_stats_update_begin(&np->swstats_rx_syncp);
+                       np->stat_rx_packets++;
+                       np->stat_rx_bytes += len;
+                       u64_stats_update_end(&np->swstats_rx_syncp);
                } else {
                        dev_kfree_skb(skb);
                }
@@ -3021,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
        }
 }
 
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       u8 __iomem *base = get_hwbase(dev);
+       u32 phyreg, txreg;
+       int mii_status;
+
+       np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+       np->duplex = duplex;
+
+       /* see if gigabit phy */
+       mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+       if (mii_status & PHY_GIGABIT) {
+               np->gigabit = PHY_GIGABIT;
+               phyreg = readl(base + NvRegSlotTime);
+               phyreg &= ~(0x3FF00);
+               if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+                       phyreg |= NVREG_SLOTTIME_10_100_FULL;
+               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+                       phyreg |= NVREG_SLOTTIME_10_100_FULL;
+               else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+                       phyreg |= NVREG_SLOTTIME_1000_FULL;
+               writel(phyreg, base + NvRegSlotTime);
+       }
+
+       phyreg = readl(base + NvRegPhyInterface);
+       phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+       if (np->duplex == 0)
+               phyreg |= PHY_HALF;
+       if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+               phyreg |= PHY_100;
+       else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+                                                       NVREG_LINKSPEED_1000)
+               phyreg |= PHY_1000;
+       writel(phyreg, base + NvRegPhyInterface);
+
+       if (phyreg & PHY_RGMII) {
+               if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+                                                       NVREG_LINKSPEED_1000)
+                       txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+               else
+                       txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+       } else {
+               txreg = NVREG_TX_DEFERRAL_DEFAULT;
+       }
+       writel(txreg, base + NvRegTxDeferral);
+
+       if (np->desc_ver == DESC_VER_1) {
+               txreg = NVREG_TX_WM_DESC1_DEFAULT;
+       } else {
+               if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+                                        NVREG_LINKSPEED_1000)
+                       txreg = NVREG_TX_WM_DESC2_3_1000;
+               else
+                       txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+       }
+       writel(txreg, base + NvRegTxWatermark);
+
+       writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+                       base + NvRegMisc1);
+       pci_push(base);
+       writel(np->linkspeed, base + NvRegLinkSpeed);
+       pci_push(base);
+
+       return;
+}
+
 /**
  * nv_update_linkspeed: Setup the MAC according to the link partner
  * @dev: Network device to be configured
@@ -3042,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev)
        int newls = np->linkspeed;
        int newdup = np->duplex;
        int mii_status;
+       u32 bmcr;
        int retval = 0;
        u32 control_1000, status_1000, phyreg, pause_flags, txreg;
        u32 txrxFlags = 0;
        u32 phy_exp;
 
+       /* If device loopback is enabled, set carrier on and enable max link
+        * speed.
+        */
+       bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+       if (bmcr & BMCR_LOOPBACK) {
+               if (netif_running(dev)) {
+                       nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+                       if (!netif_carrier_ok(dev))
+                               netif_carrier_on(dev);
+               }
+               return 1;
+       }
+
        /* BMSR_LSTATUS is latched, read it twice:
         * we want the current value.
         */
@@ -3729,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                writel(0, base + NvRegMSIXMap0);
                                writel(0, base + NvRegMSIXMap1);
                        }
+                       netdev_info(dev, "MSI-X enabled\n");
                }
        }
        if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3750,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                        writel(0, base + NvRegMSIMap1);
                        /* enable msi vector 0 */
                        writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+                       netdev_info(dev, "MSI enabled\n");
                }
        }
        if (ret != 0) {
@@ -3904,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev)
 #endif
 
 static void nv_do_stats_poll(unsigned long data)
+       __acquires(&netdev_priv(dev)->hwstats_lock)
+       __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct net_device *dev = (struct net_device *) data;
        struct fe_priv *np = netdev_priv(dev);
 
-       nv_get_hw_stats(dev);
+       /* If lock is currently taken, the stats are being refreshed
+        * and hence fresh enough */
+       if (spin_trylock(&np->hwstats_lock)) {
+               nv_update_stats(dev);
+               spin_unlock(&np->hwstats_lock);
+       }
 
        if (!np->in_shutdown)
                mod_timer(&np->stats_poll,
@@ -3918,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data)
 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct fe_priv *np = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, FORCEDETH_VERSION);
-       strcpy(info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4473,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
        return 0;
 }
 
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+       struct fe_priv *np = netdev_priv(dev);
+       unsigned long flags;
+       u32 miicontrol;
+       int err, retval = 0;
+
+       spin_lock_irqsave(&np->lock, flags);
+       miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+       if (features & NETIF_F_LOOPBACK) {
+               if (miicontrol & BMCR_LOOPBACK) {
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       netdev_info(dev, "Loopback already enabled\n");
+                       return 0;
+               }
+               nv_disable_irq(dev);
+               /* Turn on loopback mode */
+               miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+               err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+               if (err) {
+                       retval = PHY_ERROR;
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       phy_init(dev);
+               } else {
+                       if (netif_running(dev)) {
+                               /* Force 1000 Mbps full-duplex */
+                               nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+                                                                        1);
+                               /* Force link up */
+                               netif_carrier_on(dev);
+                       }
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       netdev_info(dev,
+                               "Internal PHY loopback mode enabled.\n");
+               }
+       } else {
+               if (!(miicontrol & BMCR_LOOPBACK)) {
+                       spin_unlock_irqrestore(&np->lock, flags);
+                       netdev_info(dev, "Loopback already disabled\n");
+                       return 0;
+               }
+               nv_disable_irq(dev);
+               /* Turn off loopback */
+               spin_unlock_irqrestore(&np->lock, flags);
+               netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+               phy_init(dev);
+       }
+       msleep(500);
+       spin_lock_irqsave(&np->lock, flags);
+       nv_enable_irq(dev);
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /* vlan is dependent on rx checksum offload */
        if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4482,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
 {
        struct fe_priv *np = get_nvpriv(dev);
 
@@ -4503,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features)
        spin_unlock_irq(&np->lock);
 }
 
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
-       u32 changed = dev->features ^ features;
+       netdev_features_t changed = dev->features ^ features;
+       int retval;
+
+       if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+               retval = nv_set_loopback(dev, features);
+               if (retval != 0)
+                       return retval;
+       }
 
        if (changed & NETIF_F_RXCSUM) {
                spin_lock_irq(&np->lock);
@@ -4553,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+                                struct ethtool_stats *estats, u64 *buffer)
+       __acquires(&netdev_priv(dev)->hwstats_lock)
+       __releases(&netdev_priv(dev)->hwstats_lock)
 {
        struct fe_priv *np = netdev_priv(dev);
 
-       /* update stats */
-       nv_get_hw_stats(dev);
-
-       memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+       spin_lock_bh(&np->hwstats_lock);
+       nv_update_stats(dev);
+       memcpy(buffer, &np->estats,
+              nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+       spin_unlock_bh(&np->hwstats_lock);
 }
 
 static int nv_link_test(struct net_device *dev)
@@ -5142,6 +5424,12 @@ static int nv_open(struct net_device *dev)
 
        spin_unlock_irq(&np->lock);
 
+       /* If the loopback feature was set while the device was down, make sure
+        * that it's set correctly now.
+        */
+       if (dev->features & NETIF_F_LOOPBACK)
+               nv_set_loopback(dev, dev->features);
+
        return 0;
 out_drain:
        nv_drain_rxtx(dev);
@@ -5198,7 +5486,7 @@ static int nv_close(struct net_device *dev)
 static const struct net_device_ops nv_netdev_ops = {
        .ndo_open               = nv_open,
        .ndo_stop               = nv_close,
-       .ndo_get_stats          = nv_get_stats,
+       .ndo_get_stats64        = nv_get_stats64,
        .ndo_start_xmit         = nv_start_xmit,
        .ndo_tx_timeout         = nv_tx_timeout,
        .ndo_change_mtu         = nv_change_mtu,
@@ -5215,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = {
 static const struct net_device_ops nv_netdev_ops_optimized = {
        .ndo_open               = nv_open,
        .ndo_stop               = nv_close,
-       .ndo_get_stats          = nv_get_stats,
+       .ndo_get_stats64        = nv_get_stats64,
        .ndo_start_xmit         = nv_start_xmit_optimized,
        .ndo_tx_timeout         = nv_tx_timeout,
        .ndo_change_mtu         = nv_change_mtu,
@@ -5254,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        np->dev = dev;
        np->pci_dev = pci_dev;
        spin_lock_init(&np->lock);
+       spin_lock_init(&np->hwstats_lock);
        SET_NETDEV_DEV(dev, &pci_dev->dev);
 
        init_timer(&np->oom_kick);
@@ -5262,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        init_timer(&np->nic_poll);
        np->nic_poll.data = (unsigned long) dev;
        np->nic_poll.function = nv_do_nic_poll; /* timer handler */
-       init_timer(&np->stats_poll);
+       init_timer_deferrable(&np->stats_poll);
        np->stats_poll.data = (unsigned long) dev;
        np->stats_poll.function = nv_do_stats_poll;     /* timer handler */
 
@@ -5346,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
 
        dev->features |= dev->hw_features;
 
+       /* Add loopback capability to the device. */
+       dev->hw_features |= NETIF_F_LOOPBACK;
+
        np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
        if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
            (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5621,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
        dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
                 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
-       dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+       dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
                 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
                 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
                        "csum " : "",
                 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
                        "vlan " : "",
+                dev->features & (NETIF_F_LOOPBACK) ?
+                       "loopback " : "",
                 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
                 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
                 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -6000,6 +6294,9 @@ module_param(phy_cross, int, 0);
 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
 module_param(phy_power_down, int, 0);
 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+                "Dump tx related registers and ring when tx_timeout happens");
 
 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
index 8c80271..ac4e72d 100644 (file)
@@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-       strcpy(drvinfo->driver, KBUILD_MODNAME);
-       strcpy(drvinfo->version, pch_driver_version);
-       strcpy(drvinfo->fw_version, "N/A");
-       strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+       strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
 }
 
index 48406ca..964e9c0 100644 (file)
@@ -2109,10 +2109,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
  * Returns
  *     0:              HW state updated successfully
  */
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (!(changed & NETIF_F_RXCSUM))
                return 0;
index e09ea83..8a37198 100644 (file)
@@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        u32 fw_minor = 0;
        u32 fw_build = 0;
 
-       strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
-       strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+       strlcpy(drvinfo->driver, netxen_nic_driver_name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+               sizeof(drvinfo->version));
        fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
        fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
        fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
-       sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d", fw_major, fw_minor, fw_build);
 
-       strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
        drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
 }
index 8cf3173..7dd9a4b 100644 (file)
@@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev)
        adapter->set_multi(dev);
 }
 
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        if (!(features & NETIF_F_RXCSUM)) {
                netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct netxen_adapter *adapter = netdev_priv(dev);
        int hw_lro;
index a4bdff4..7931531 100644 (file)
@@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev,
                           struct ethtool_drvinfo *drvinfo)
 {
        struct ql3_adapter *qdev = netdev_priv(ndev);
-       strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
-       strncpy(drvinfo->version, ql3xxx_driver_version, 32);
-       strncpy(drvinfo->fw_version, "N/A", 32);
-       strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+       strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, ql3xxx_driver_version,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->regdump_len = 0;
        drvinfo->eedump_len = 0;
 }
index 7ed53db..60976fc 100644 (file)
@@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
 
 int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
 int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+       netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
 int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
 int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
index 8aa1c6e..cc228cf 100644 (file)
@@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
        fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
        fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
-       sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
-
-       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
-       strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
-       strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+               "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+       strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+               sizeof(drvinfo->bus_info));
+       strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+               sizeof(drvinfo->version));
 }
 
 static int
index bcb81e4..b528e52 100644 (file)
@@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
 }
 
 
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
-               u32 changed = features ^ netdev->features;
+               netdev_features_t changed = features ^ netdev->features;
                features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
        }
 
@@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
 }
 
 
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       u32 changed = netdev->features ^ features;
+       netdev_features_t changed = netdev->features ^ features;
        int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
 
        if (!(changed & NETIF_F_LRO))
index 0bd1638..69b8e4e 100644 (file)
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
 static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
                                struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
 
 /*  PCI Device ID Table  */
 #define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
                adapter->pvid = 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        set_bit(vid, adapter->vlans);
+       return 0;
 }
 
-static void
+static int
 qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
        clear_bit(vid, adapter->vlans);
+       return 0;
 }
 
 static void
@@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
                struct qlcnic_esw_func_cfg *esw_cfg)
 {
        struct net_device *netdev = adapter->netdev;
-       unsigned long features, vlan_features;
+       netdev_features_t features, vlan_features;
 
        features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                        NETIF_F_IPV6_CSUM | NETIF_F_GRO);
index 9b67bfe..8e2c2a7 100644 (file)
@@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev,
                           struct ethtool_drvinfo *drvinfo)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
-       strncpy(drvinfo->driver, qlge_driver_name, 32);
-       strncpy(drvinfo->version, qlge_driver_version, 32);
-       snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+       strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, qlge_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "v%d.%d.%d",
                 (qdev->fw_rev_id & 0x00ff0000) >> 16,
                 (qdev->fw_rev_id & 0x0000ff00) >> 8,
                 (qdev->fw_rev_id & 0x000000ff));
-       strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+       strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+               sizeof(drvinfo->bus_info));
        drvinfo->n_stats = 0;
        drvinfo->testinfo_len = 0;
        if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
index c92afcd..b548987 100644 (file)
@@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
 
@@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features)
        }
 }
 
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+       netdev_features_t features)
 {
        /*
         * Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features)
        return features;
 }
 
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+       netdev_features_t features)
 {
-       u32 changed = ndev->features ^ features;
+       netdev_features_t changed = ndev->features ^ features;
 
        if (changed & NETIF_F_HW_VLAN_RX)
                qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features)
        return 0;
 }
 
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
 {
        u32 enable_bit = MAC_ADDR_E;
+       int err;
 
-       if (ql_set_mac_addr_reg
-           (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
                netif_err(qdev, ifup, qdev->ndev,
                          "Failed to init vlan address.\n");
-       }
+       return err;
 }
 
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
+       int err;
 
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
-               return;
+               return status;
 
-       __qlge_vlan_rx_add_vid(qdev, vid);
+       err = __qlge_vlan_rx_add_vid(qdev, vid);
        set_bit(vid, qdev->active_vlans);
 
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
 }
 
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
 {
        u32 enable_bit = 0;
+       int err;
 
-       if (ql_set_mac_addr_reg
-           (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+       err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+                                 MAC_ADDR_TYPE_VLAN, vid);
+       if (err)
                netif_err(qdev, ifup, qdev->ndev,
                          "Failed to clear vlan address.\n");
-       }
+       return err;
 }
 
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status;
+       int err;
 
        status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
        if (status)
-               return;
+               return status;
 
-       __qlge_vlan_rx_kill_vid(qdev, vid);
+       err = __qlge_vlan_rx_kill_vid(qdev, vid);
        clear_bit(vid, qdev->active_vlans);
 
        ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+       return err;
 }
 
 static void qlge_restore_vlan(struct ql_adapter *qdev)
index ee5da92..87cff10 100644 (file)
@@ -1319,9 +1319,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct cp_private *cp = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(cp->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
 }
 
 static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1392,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
        cp->msg_enable = value;
 }
 
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct cp_private *cp = netdev_priv(dev);
        unsigned long flags;
index 4d6b254..d9c7227 100644 (file)
@@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct rtl8139_private *tp = netdev_priv(dev);
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(tp->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
        info->regdump_len = tp->regs_len;
 }
 
index 67bf078..36b2a4b 100644 (file)
@@ -69,9 +69,6 @@
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 static const int multicast_filter_limit = 32;
 
-/* MAC address length */
-#define MAC_ADDR_LEN   6
-
 #define MAX_READ_REQUEST_SHIFT 12
 #define TX_DMA_BURST   6       /* Maximum PCI burst, '6' is 1024 */
 #define SafeMtu                0x1c20  /* ... actually life sucks beyond ~7k */
@@ -1406,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
        struct rtl8169_private *tp = netdev_priv(dev);
        struct rtl_fw *rtl_fw = tp->rtl_fw;
 
-       strcpy(info->driver, MODULENAME);
-       strcpy(info->version, RTL8169_VERSION);
-       strcpy(info->bus_info, pci_name(tp->pci_dev));
+       strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+       strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
        BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
-       strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
-              rtl_fw->version);
+       if (!IS_ERR_OR_NULL(rtl_fw))
+               strlcpy(info->fw_version, rtl_fw->version,
+                       sizeof(info->fw_version));
 }
 
 static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1555,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        return ret;
 }
 
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
@@ -1569,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
@@ -4099,7 +4099,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        spin_lock_init(&tp->lock);
 
        /* Get MAC address */
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                dev->dev_addr[i] = RTL_R8(MAC0 + i);
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
index 9b23074..ebfb682 100644 (file)
@@ -1957,18 +1957,7 @@ static struct platform_driver sh_eth_driver = {
        },
 };
 
-static int __init sh_eth_init(void)
-{
-       return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
-       platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
 
 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
index c3673f1..f955a19 100644 (file)
@@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = {
        }
 };
 
-static int __init sgiseeq_module_init(void)
-{
-       if (platform_driver_register(&sgiseeq_driver)) {
-               printk(KERN_ERR "Driver registration failed\n");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
-       platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
 
 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
index d5731f1..14e134d 100644 (file)
@@ -1900,7 +1900,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
        /* Otherwise efx_start_port() will do this */
 }
 
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
index b8e251a..c49502b 100644 (file)
@@ -908,7 +908,7 @@ struct efx_nic_type {
        unsigned int phys_addr_channels;
        unsigned int tx_dc_base;
        unsigned int rx_dc_base;
-       u32 offload_features;
+       netdev_features_t offload_features;
 };
 
 /**************************************************************************
index 752d521..955b149 100644 (file)
@@ -669,7 +669,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
                  rx_queue->ptr_mask);
 
        /* Allocate RX buffers */
-       rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+       rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
                                   GFP_KERNEL);
        if (!rx_queue->buffer)
                return -ENOMEM;
index 822f6c2..52edd24 100644 (file)
@@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
                /* Determine how many packets to send */
                state->packet_count = efx->txq_entries / 3;
                state->packet_count = min(1 << (i << 2), state->packet_count);
-               state->skbs = kzalloc(sizeof(state->skbs[0]) *
-                                     state->packet_count, GFP_KERNEL);
+               state->skbs = kcalloc(state->packet_count,
+                                     sizeof(state->skbs[0]), GFP_KERNEL);
                if (!state->skbs)
                        return -ENOMEM;
                state->flush = false;
index df88c54..72f0fbc 100644 (file)
@@ -31,7 +31,9 @@
 #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
 
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
-                              struct efx_tx_buffer *buffer)
+                              struct efx_tx_buffer *buffer,
+                              unsigned int *pkts_compl,
+                              unsigned int *bytes_compl)
 {
        if (buffer->unmap_len) {
                struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        }
 
        if (buffer->skb) {
+               (*pkts_compl)++;
+               (*bytes_compl) += buffer->skb->len;
                dev_kfree_skb_any((struct sk_buff *) buffer->skb);
                buffer->skb = NULL;
                netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        buffer->skb = skb;
        buffer->continuation = false;
 
+       netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
  unwind:
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
+               unsigned int pkts_compl = 0, bytes_compl = 0;
                --tx_queue->insert_count;
                insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
                buffer = &tx_queue->buffer[insert_ptr];
-               efx_dequeue_buffer(tx_queue, buffer);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
                buffer->len = 0;
        }
 
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
  * specified index.
  */
 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
-                               unsigned int index)
+                               unsigned int index,
+                               unsigned int *pkts_compl,
+                               unsigned int *bytes_compl)
 {
        struct efx_nic *efx = tx_queue->efx;
        unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
                        return;
                }
 
-               efx_dequeue_buffer(tx_queue, buffer);
+               efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
                buffer->continuation = true;
                buffer->len = 0;
 
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned fill_level;
        struct efx_nic *efx = tx_queue->efx;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
 
        EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
 
-       efx_dequeue_buffers(tx_queue, index);
+       efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+       netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
 
        /* See if we need to restart the netif queue.  This barrier
         * separates the update of read_count from the test of the
@@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
                  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
 
        /* Allocate software ring */
-       tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+       tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
                                   GFP_KERNEL);
        if (!tx_queue->buffer)
                return -ENOMEM;
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
 
        /* Free any buffers left in the ring */
        while (tx_queue->read_count != tx_queue->write_count) {
+               unsigned int pkts_compl = 0, bytes_compl = 0;
                buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
-               efx_dequeue_buffer(tx_queue, buffer);
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
                buffer->continuation = true;
                buffer->len = 0;
 
                ++tx_queue->read_count;
        }
+       netdev_tx_reset_queue(tx_queue->core_txq);
 }
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                        goto mem_err;
        }
 
+       netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
index 60135aa..f98c6c6 100644 (file)
@@ -830,24 +830,7 @@ static struct platform_driver meth_driver = {
        }
 };
 
-static int __init meth_init_module(void)
-{
-       int err;
-
-       err = platform_driver_register(&meth_driver);
-       if (err)
-               printk(KERN_ERR "Driver registration failed\n");
-
-       return err;
-}
-
-static void __exit meth_exit_module(void)
-{
-       platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
 
 MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
 MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
index 1b4658c..5b118cd 100644 (file)
@@ -47,8 +47,6 @@
 #define sis190_rx_skb                  netif_rx
 #define sis190_rx_quota(count, quota)  count
 
-#define MAC_ADDR_LEN           6
-
 #define NUM_TX_DESC            64      /* [8..1024] */
 #define NUM_RX_DESC            64      /* [8..8192] */
 #define TX_RING_BYTES          (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
        }
 
        /* Get MAC address from EEPROM */
-       for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+       for (i = 0; i < ETH_ALEN / 2; i++) {
                u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
 
                ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
        udelay(50);
        pci_read_config_byte(isa_bridge, 0x48, &reg);
 
-        for (i = 0; i < MAC_ADDR_LEN; i++) {
+        for (i = 0; i < ETH_ALEN; i++) {
                 outb(0x9 + i, 0x78);
                 dev->dev_addr[i] = inb(0x79);
         }
@@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
         */
        SIS_W16(RxMacControl, ctl & ~0x0f00);
 
-       for (i = 0; i < MAC_ADDR_LEN; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
 
        SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev,
 {
        struct sis190_private *tp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(tp->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(tp->pci_dev),
+               sizeof(info->bus_info));
 }
 
 static int sis190_get_regs_len(struct net_device *dev)
index a184abc..c8efc70 100644 (file)
@@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
 {
        struct sis900_private *sis_priv = netdev_priv(net_dev);
 
-       strcpy (info->driver, SIS900_MODULE_NAME);
-       strcpy (info->version, SIS900_DRV_VERSION);
-       strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+       strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+               sizeof(info->bus_info));
 }
 
 static u32 sis900_get_msglevel(struct net_device *net_dev)
index 0a5dfb8..2c077ce 100644 (file)
@@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
 {
        struct epic_private *np = netdev_priv(dev);
 
-       strcpy (info->driver, DRV_NAME);
-       strcpy (info->version, DRV_VERSION);
-       strcpy (info->bus_info, pci_name(np->pci_dev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index 8f61fe9..313ba3b 100644 (file)
@@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = {
        },
 };
 
-static int __init smc911x_init(void)
-{
-       return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
-       platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
index cbfa981..ada927a 100644 (file)
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
 
 static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
index f47f81e..64ad3ed 100644 (file)
@@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = {
        },
 };
 
-static int __init smc_init(void)
-{
-       return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
-       platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
index 8843071..06d0df6 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
@@ -88,6 +89,8 @@ struct smsc911x_ops {
                                unsigned int *buf, unsigned int wordcount);
 };
 
+#define SMSC911X_NUM_SUPPLIES 2
+
 struct smsc911x_data {
        void __iomem *ioaddr;
 
@@ -138,6 +141,9 @@ struct smsc911x_data {
 
        /* register access functions */
        const struct smsc911x_ops *ops;
+
+       /* regulators */
+       struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
 };
 
 /* Easy access to information */
@@ -362,6 +368,76 @@ out:
        spin_unlock_irqrestore(&pdata->dev_lock, flags);
 }
 
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+       int ret = 0;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+       if (ret)
+               netdev_err(ndev, "failed to enable regulators %d\n",
+                               ret);
+       return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+       int ret = 0;
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+       return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+       int ret = 0;
+
+       /* Request regulators */
+       pdata->supplies[0].supply = "vdd33a";
+       pdata->supplies[1].supply = "vddvario";
+       ret = regulator_bulk_get(&pdev->dev,
+                       ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+       if (ret)
+               netdev_err(ndev, "couldn't get regulators %d\n",
+                               ret);
+       return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct smsc911x_data *pdata = netdev_priv(ndev);
+
+       /* Free regulators */
+       regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+                       pdata->supplies);
+}
+
 /* waits for MAC not busy, with timeout.  Only called by smsc911x_mac_read
  * and smsc911x_mac_write, so assumes mac_lock is held */
 static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -2092,6 +2168,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
 
        iounmap(pdata->ioaddr);
 
+       (void)smsc911x_disable_resources(pdev);
+       smsc911x_free_resources(pdev);
+
        free_netdev(dev);
 
        return 0;
@@ -2218,10 +2297,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
        pdata->dev = dev;
        pdata->msg_enable = ((1 << debug) - 1);
 
+       platform_set_drvdata(pdev, dev);
+
+       retval = smsc911x_request_resources(pdev);
+       if (retval)
+               goto out_return_resources;
+
+       retval = smsc911x_enable_resources(pdev);
+       if (retval)
+               goto out_disable_resources;
+
        if (pdata->ioaddr == NULL) {
                SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
                retval = -ENOMEM;
-               goto out_free_netdev_2;
+               goto out_disable_resources;
        }
 
        retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2233,7 +2322,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
        if (retval) {
                SMSC_WARN(pdata, probe, "Error smsc911x config not found");
-               goto out_unmap_io_3;
+               goto out_disable_resources;
        }
 
        /* assume standard, non-shifted, access to HW registers */
@@ -2244,7 +2333,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
        retval = smsc911x_init(dev);
        if (retval < 0)
-               goto out_unmap_io_3;
+               goto out_disable_resources;
 
        /* configure irq polarity and type before connecting isr */
        if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2264,15 +2353,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
        if (retval) {
                SMSC_WARN(pdata, probe,
                          "Unable to claim requested irq: %d", dev->irq);
-               goto out_unmap_io_3;
+               goto out_free_irq;
        }
 
-       platform_set_drvdata(pdev, dev);
-
        retval = register_netdev(dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-               goto out_unset_drvdata_4;
+               goto out_free_irq;
        } else {
                SMSC_TRACE(pdata, probe,
                           "Network interface: \"%s\"", dev->name);
@@ -2321,12 +2408,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
 
 out_unregister_netdev_5:
        unregister_netdev(dev);
-out_unset_drvdata_4:
-       platform_set_drvdata(pdev, NULL);
+out_free_irq:
        free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+       (void)smsc911x_disable_resources(pdev);
+out_return_resources:
+       smsc911x_free_resources(pdev);
+       platform_set_drvdata(pdev, NULL);
        iounmap(pdata->ioaddr);
-out_free_netdev_2:
        free_netdev(dev);
 out_release_io_1:
        release_mem_region(res->start, resource_size(res));
index edb24b0..a9efbdf 100644 (file)
@@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
 {
        struct smsc9420_pdata *pd = netdev_priv(netdev);
 
-       strcpy(drvinfo->driver, DRV_NAME);
-       strcpy(drvinfo->bus_info, pci_name(pd->pdev));
-       strcpy(drvinfo->version, DRV_VERSION);
+       strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+               sizeof(drvinfo->bus_info));
+       strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 }
 
 static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
index 0395f9e..ed83c4c 100644 (file)
@@ -185,9 +185,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
        struct stmmac_priv *priv = netdev_priv(dev);
 
        if (priv->plat->has_gmac)
-               strcpy(info->driver, GMAC_ETHTOOL_NAME);
+               strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
        else
-               strcpy(info->driver, MAC100_ETHTOOL_NAME);
+               strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+                       sizeof(info->driver));
 
        strcpy(info->version, DRV_MODULE_VERSION);
        info->fw_version[0] = '\0';
index 72cd190..24c2bf6 100644 (file)
@@ -1470,7 +1470,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
 
@@ -2130,27 +2131,6 @@ static struct platform_driver stmmac_driver = {
        },
 };
 
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&stmmac_driver);
-       return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
-       platform_driver_unregister(&stmmac_driver);
-}
-
 #ifndef MODULE
 static int __init stmmac_cmdline_opt(char *str)
 {
@@ -2210,8 +2190,7 @@ err:
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif
 
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
+module_platform_driver(stmmac_driver);
 
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
index fd40988..f10665f 100644 (file)
@@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev)
 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct cas *cp = netdev_priv(dev);
-       strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
-       info->fw_version[0] = '\0';
-       strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
        info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
                cp->casreg_len : CAS_MAX_REGS;
        info->n_stats = CAS_NUM_STAT_KEYS;
index 73c7081..cf43393 100644 (file)
@@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p)
                supported |= SUPPORTED_1000baseT_Full;
        lp->supported = supported;
 
-       advertising = 0;
-       if (advert & ADVERTISE_10HALF)
-               advertising |= ADVERTISED_10baseT_Half;
-       if (advert & ADVERTISE_10FULL)
-               advertising |= ADVERTISED_10baseT_Full;
-       if (advert & ADVERTISE_100HALF)
-               advertising |= ADVERTISED_100baseT_Half;
-       if (advert & ADVERTISE_100FULL)
-               advertising |= ADVERTISED_100baseT_Full;
-       if (ctrl1000 & ADVERTISE_1000HALF)
-               advertising |= ADVERTISED_1000baseT_Half;
-       if (ctrl1000 & ADVERTISE_1000FULL)
-               advertising |= ADVERTISED_1000baseT_Full;
+       advertising = mii_adv_to_ethtool_adv_t(advert);
+       advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 
        if (bmcr & BMCR_ANENABLE) {
                int neg, neg1000;
@@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
 static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
 {
        struct netdev_queue *txq;
+       unsigned int tx_bytes;
        u16 pkt_cnt, tmp;
        int cons, index;
        u64 cs;
@@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
        netif_printk(np, tx_done, KERN_DEBUG, np->dev,
                     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
 
-       while (pkt_cnt--)
+       tx_bytes = 0;
+       tmp = pkt_cnt;
+       while (tmp--) {
+               tx_bytes += rp->tx_buffs[cons].skb->len;
                cons = release_tx_packet(np, rp, cons);
+       }
 
        rp->cons = cons;
        smp_mb();
 
+       netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
 out:
        if (unlikely(netif_tx_queue_stopped(txq) &&
                     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np)
                        struct tx_ring_info *rp = &np->tx_rings[i];
 
                        niu_free_tx_ring_info(np, rp);
+                       netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
                }
                kfree(np->tx_rings);
                np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                prod = NEXT_TX(rp, prod);
        }
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        if (prod < rp->prod)
                rp->wrap_bit ^= TX_RING_KICK_WRAP;
        rp->prod = prod;
@@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev,
        struct niu *np = netdev_priv(dev);
        struct niu_vpd *vpd = &np->vpd;
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       sprintf(info->fw_version, "%d.%d",
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
                vpd->fcode_major, vpd->fcode_minor);
        if (np->parent->plat_type != PLAT_TYPE_NIU)
-               strcpy(info->bus_info, pci_name(np->pdev));
+               strlcpy(info->bus_info, pci_name(np->pdev),
+                       sizeof(info->bus_info));
 }
 
 static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent,
        if (dev_id_1 < 0 || dev_id_2 < 0)
                return 0;
        if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+               /* Because of the NIU_PHY_ID_MASK being applied, the 8704
+                * test covers the 8706 as well.
+                */
                if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
                        return 0;
        } else {
                if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
index 0d8cfd9..220f724 100644 (file)
@@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = {
        .remove         = __devexit_p(bigmac_sbus_remove),
 };
 
-static int __init bigmac_init(void)
-{
-       return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
-       platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
index ceab215..31441a8 100644 (file)
@@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct gem *gp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(gp->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
 }
 
 static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index cf14ab9..eebd52f 100644 (file)
@@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct happy_meal *hp = netdev_priv(dev);
 
-       strcpy(info->driver, "sunhme");
-       strcpy(info->version, "2.02");
+       strlcpy(info->driver, "sunhme", sizeof(info->driver));
+       strlcpy(info->version, "2.02", sizeof(info->version));
        if (hp->happy_flags & HFLAG_PCI) {
                struct pci_dev *pdev = hp->happy_dev;
-               strcpy(info->bus_info, pci_name(pdev));
+               strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
        }
 #ifdef CONFIG_SBUS
        else {
@@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
                struct platform_device *op = hp->happy_dev;
                regs = of_get_property(op->dev.of_node, "regs", NULL);
                if (regs)
-                       sprintf(info->bus_info, "SBUS:%d",
+                       snprintf(info->bus_info, sizeof(info->bus_info),
+                               "SBUS:%d",
                                regs->which_io);
        }
 #endif
index 3a90af6..4b19e9b 100644 (file)
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
  * @ndev network device
  * @vid  VLAN vid to add
  */
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
 {
        __bdx_vlan_rx_vid(ndev, vid, 1);
+       return 0;
 }
 
 /*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
  * @ndev network device
  * @vid  VLAN vid to kill
  */
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
 {
        __bdx_vlan_rx_vid(ndev, vid, 0);
+       return 0;
 }
 
 /**
index a8df7ec..a9ce01b 100644 (file)
@@ -1688,18 +1688,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr)
        mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
 }
 
-static int tsi108_ether_init(void)
-{
-       int ret;
-       ret = platform_driver_register (&tsi_eth_driver);
-       if (ret < 0){
-               printk("tsi108_ether_init: error initializing ethernet "
-                      "device\n");
-               return ret;
-       }
-       return 0;
-}
-
 static int tsi108_ether_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1702,7 @@ static int tsi108_ether_remove(struct platform_device *pdev)
 
        return 0;
 }
-static void tsi108_ether_exit(void)
-{
-       platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
 
 MODULE_AUTHOR("Tundra Semiconductor Corporation");
 MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
index f34dd99..bcdbdc7 100644 (file)
@@ -488,8 +488,8 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
@@ -1261,7 +1261,7 @@ static void rhine_update_vcam(struct net_device *dev)
        rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
 }
 
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct rhine_private *rp = netdev_priv(dev);
 
@@ -1269,9 +1269,10 @@ static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, rp->active_vlans);
        rhine_update_vcam(dev);
        spin_unlock_irq(&rp->lock);
+       return 0;
 }
 
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct rhine_private *rp = netdev_priv(dev);
 
@@ -1279,6 +1280,7 @@ static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, rp->active_vlans);
        rhine_update_vcam(dev);
        spin_unlock_irq(&rp->lock);
+       return 0;
 }
 
 static void init_registers(struct net_device *dev)
@@ -2009,9 +2011,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
 {
        struct rhine_private *rp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(rp->pdev));
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
 }
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
index 4535d7c..4128d6b 100644 (file)
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
        mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
 }
 
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
 
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
        set_bit(vid, vptr->active_vlans);
        velocity_init_cam_filter(vptr);
        spin_unlock_irq(&vptr->lock);
+       return 0;
 }
 
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct velocity_info *vptr = netdev_priv(dev);
 
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
        clear_bit(vid, vptr->active_vlans);
        velocity_init_cam_filter(vptr);
        spin_unlock_irq(&vptr->lock);
+       return 0;
 }
 
 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -3270,9 +3272,9 @@ static int velocity_set_settings(struct net_device *dev,
 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct velocity_info *vptr = netdev_priv(dev);
-       strcpy(info->driver, VELOCITY_NAME);
-       strcpy(info->version, VELOCITY_VERSION);
-       strcpy(info->bus_info, pci_name(vptr->pdev));
+       strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+       strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
 }
 
 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
index 2681b53..903a77b 100644 (file)
@@ -237,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
        struct sk_buff *skb;
        int i;
 
-       lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+       lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
        if (!lp->rx_skb) {
                dev_err(&ndev->dev,
                                "can't allocate memory for DMA RX buffer\n");
@@ -920,12 +920,26 @@ temac_poll_controller(struct net_device *ndev)
 }
 #endif
 
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+       struct temac_local *lp = netdev_priv(ndev);
+
+       if (!netif_running(ndev))
+               return -EINVAL;
+
+       if (!lp->phy_dev)
+               return -EINVAL;
+
+       return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
 static const struct net_device_ops temac_netdev_ops = {
        .ndo_open = temac_open,
        .ndo_stop = temac_stop,
        .ndo_start_xmit = temac_start_xmit,
        .ndo_set_mac_address = netdev_set_mac_address,
        .ndo_validate_addr = eth_validate_addr,
+       .ndo_do_ioctl = temac_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = temac_poll_controller,
 #endif
@@ -1167,17 +1181,7 @@ static struct platform_driver temac_of_driver = {
        },
 };
 
-static int __init temac_init(void)
-{
-       return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
-       platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
 
 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
 MODULE_AUTHOR("Yoshio Kashiwagi");
index 8018d7d..dca6541 100644 (file)
@@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = {
        .remove         = __devexit_p(xemaclite_of_remove),
 };
 
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return:     0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
-       /* No kernel boot options used, we just need to register the driver */
-       return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
-       platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
 
 MODULE_AUTHOR("Xilinx, Inc.");
 MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
index bbe8b7d..33979c3 100644 (file)
@@ -1411,7 +1411,7 @@ do_open(struct net_device *dev)
 static void netdev_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, "xirc2ps_cs");
+       strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
        sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
 }
 
index 46b5f5f..e05b645 100644 (file)
@@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
 };
 
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
                      NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6      | \
                      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
 
index 9d4ce1a..a561ae4 100644 (file)
@@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = {
        },
 };
 
-static int __init bfin_sir_init(void)
-{
-       return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
-       platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
 
 module_param(max_rate, int, 0);
 MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
index d0851df..81d5275 100644 (file)
@@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = {
        .resume         = pxa_irda_resume,
 };
 
-static int __init pxa_irda_init(void)
-{
-       return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
-       platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pxa2xx-ir");
index d275e27..725d6b3 100644 (file)
@@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = {
        },
 };
 
-static int __init sh_irda_init(void)
-{
-       return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
-       platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
 
 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
index ed7d7d6..e6661b5 100644 (file)
@@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = {
        },
 };
 
-static int __init sh_sir_init(void)
-{
-       return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
-       platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
 
 MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
 MODULE_DESCRIPTION("SuperH IrDA driver");
index 4ce9e5f..b71998d 100644 (file)
@@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev)
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
                | NETIF_F_ALL_TSO
                | NETIF_F_UFO
-               | NETIF_F_NO_CSUM
+               | NETIF_F_HW_CSUM
                | NETIF_F_RXCSUM
                | NETIF_F_HIGHDMA
                | NETIF_F_LLTX
index 7413497..f2f820c 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <linux/if_link.h>
 #include <linux/if_macvlan.h>
 #include <net/rtnetlink.h>
@@ -520,26 +521,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
        return stats;
 }
 
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
                                    unsigned short vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
-       const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-       if (ops->ndo_vlan_rx_add_vid)
-               ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+       return vlan_vid_add(lowerdev, vid);
 }
 
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
                                     unsigned short vid)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct net_device *lowerdev = vlan->lowerdev;
-       const struct net_device_ops *ops = lowerdev->netdev_ops;
 
-       if (ops->ndo_vlan_rx_kill_vid)
-               ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+       vlan_vid_del(lowerdev, vid);
+       return 0;
 }
 
 static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
index 1b7082d..7c88d13 100644 (file)
@@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
        if (vlan) {
                int index = get_slot(vlan, q);
 
-               rcu_assign_pointer(vlan->taps[index], NULL);
-               rcu_assign_pointer(q->vlan, NULL);
+               RCU_INIT_POINTER(vlan->taps[index], NULL);
+               RCU_INIT_POINTER(q->vlan, NULL);
                sock_put(&q->sk);
                --vlan->numvtaps;
        }
@@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev)
                                              lockdep_is_held(&macvtap_lock));
                if (q) {
                        qlist[j++] = q;
-                       rcu_assign_pointer(vlan->taps[i], NULL);
-                       rcu_assign_pointer(q->vlan, NULL);
+                       RCU_INIT_POINTER(vlan->taps[i], NULL);
+                       RCU_INIT_POINTER(q->vlan, NULL);
                        vlan->numvtaps--;
                }
        }
index c62e781..c70c233 100644 (file)
 
 static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
 {
-       u32 result = 0;
        int advert;
 
        advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
-       if (advert & LPA_LPACK)
-               result |= ADVERTISED_Autoneg;
-       if (advert & ADVERTISE_10HALF)
-               result |= ADVERTISED_10baseT_Half;
-       if (advert & ADVERTISE_10FULL)
-               result |= ADVERTISED_10baseT_Full;
-       if (advert & ADVERTISE_100HALF)
-               result |= ADVERTISED_100baseT_Half;
-       if (advert & ADVERTISE_100FULL)
-               result |= ADVERTISED_100baseT_Full;
-       if (advert & ADVERTISE_PAUSE_CAP)
-               result |= ADVERTISED_Pause;
-       if (advert & ADVERTISE_PAUSE_ASYM)
-               result |= ADVERTISED_Asym_Pause;
-
-       return result;
+
+       return mii_lpa_to_ethtool_lpa_t(advert);
 }
 
 /**
@@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
                ecmd->autoneg = AUTONEG_ENABLE;
 
                ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
-               if (ctrl1000 & ADVERTISE_1000HALF)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Half;
-               if (ctrl1000 & ADVERTISE_1000FULL)
-                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+               if (mii->supports_gmii)
+                       ecmd->advertising |=
+                                       mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
 
                if (bmsr & BMSR_ANEGCOMPLETE) {
                        ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
-                       if (stat1000 & LPA_1000HALF)
-                               ecmd->lp_advertising |=
-                                       ADVERTISED_1000baseT_Half;
-                       if (stat1000 & LPA_1000FULL)
-                               ecmd->lp_advertising |=
-                                       ADVERTISED_1000baseT_Full;
+                       ecmd->lp_advertising |=
+                                       mii_stat1000_to_ethtool_lpa_t(stat1000);
                } else {
                        ecmd->lp_advertising = 0;
                }
@@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
                        advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
                        tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
                }
-               if (ecmd->advertising & ADVERTISED_10baseT_Half)
-                       tmp |= ADVERTISE_10HALF;
-               if (ecmd->advertising & ADVERTISED_10baseT_Full)
-                       tmp |= ADVERTISE_10FULL;
-               if (ecmd->advertising & ADVERTISED_100baseT_Half)
-                       tmp |= ADVERTISE_100HALF;
-               if (ecmd->advertising & ADVERTISED_100baseT_Full)
-                       tmp |= ADVERTISE_100FULL;
-               if (mii->supports_gmii) {
-                       if (ecmd->advertising & ADVERTISED_1000baseT_Half)
-                               tmp2 |= ADVERTISE_1000HALF;
-                       if (ecmd->advertising & ADVERTISED_1000baseT_Full)
-                               tmp2 |= ADVERTISE_1000FULL;
-               }
+               tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+               if (mii->supports_gmii)
+                       tmp2 |=
+                             ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
                if (advert != tmp) {
                        mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
                        mii->advertising = tmp;
index 6539189..daec9b0 100644 (file)
@@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
        return 0;
 }
 
+static int mdiobb_reset(struct mii_bus *bus)
+{
+       struct mdiobb_ctrl *ctrl = bus->priv;
+       if (ctrl->reset)
+               ctrl->reset(bus);
+       return 0;
+}
+
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 {
        struct mii_bus *bus;
@@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 
        bus->read = mdiobb_read;
        bus->write = mdiobb_write;
+       bus->reset = mdiobb_reset;
        bus->priv = ctrl;
 
        return bus;
index 2843c90..89c5a3e 100644 (file)
@@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
                goto out;
 
        bitbang->ctrl.ops = &mdio_gpio_ops;
+       bitbang->ctrl.reset = pdata->reset;
        bitbang->mdc = pdata->mdc;
        bitbang->mdio = pdata->mdio;
 
index 83a5a5a..f320f46 100644 (file)
@@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev)
        if (adv < 0)
                return adv;
 
-       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | 
+       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
                 ADVERTISE_PAUSE_ASYM);
-       if (advertise & ADVERTISED_10baseT_Half)
-               adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               adv |= ADVERTISE_100FULL;
-       if (advertise & ADVERTISED_Pause)
-               adv |= ADVERTISE_PAUSE_CAP;
-       if (advertise & ADVERTISED_Asym_Pause)
-               adv |= ADVERTISE_PAUSE_ASYM;
+       adv |= ethtool_adv_to_mii_adv_t(advertise);
 
        if (adv != oldadv) {
                err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev)
                        return adv;
 
                adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
-               if (advertise & SUPPORTED_1000baseT_Half)
-                       adv |= ADVERTISE_1000HALF;
-               if (advertise & SUPPORTED_1000baseT_Full)
-                       adv |= ADVERTISE_1000FULL;
+               adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
 
                if (adv != oldadv) {
                        err = phy_write(phydev, MII_CTRL1000, adv);
index f8a6853..c1c9293 100644 (file)
@@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock)
 {
        spin_lock(&chan_lock);
        clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-       rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+       RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
        spin_unlock(&chan_lock);
        synchronize_rcu();
 }
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644 (file)
index 0000000..248a144
--- /dev/null
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+       tristate "Ethernet team driver support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       ---help---
+         This allows one to create virtual interfaces that teams together
+         multiple ethernet devices.
+
+         Team devices can be added using the "ip" command from the
+         iproute2 package:
+
+         "ip link add link [ address MAC ] [ NAME ] type team"
+
+         To compile this driver as a module, choose M here: the module
+         will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+       tristate "Round-robin mode support"
+       depends on NET_TEAM
+       ---help---
+         Basic mode where port used for transmitting packets is selected in
+         round-robin fashion using packet counter.
+
+         All added ports are setup to have bond's mac address.
+
+         To compile this team mode as a module, choose M here: the module
+         will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+       tristate "Active-backup mode support"
+       depends on NET_TEAM
+       ---help---
+         Only one port is active at a time and the rest of ports are used
+         for backup.
+
+         Mac addresses of ports are not modified. Userspace is responsible
+         to do so.
+
+         To compile this team mode as a module, choose M here: the module
+         will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644 (file)
index 0000000..85f2028
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644 (file)
index 0000000..ed2a862
--- /dev/null
@@ -0,0 +1,1684 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+       struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+       return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+       struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+       return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+                         const unsigned char *dev_addr)
+{
+       struct sockaddr addr;
+
+       memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+       addr.sa_family = ARPHRD_ETHER;
+       return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+       return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+       return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+       struct team_option *option;
+
+       list_for_each_entry(option, &team->option_list, list) {
+               if (strcmp(option->name, opt_name) == 0)
+                       return option;
+       }
+       return NULL;
+}
+
+int team_options_register(struct team *team,
+                         const struct team_option *option,
+                         size_t option_count)
+{
+       int i;
+       struct team_option **dst_opts;
+       int err;
+
+       dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+                          GFP_KERNEL);
+       if (!dst_opts)
+               return -ENOMEM;
+       for (i = 0; i < option_count; i++, option++) {
+               if (__team_find_option(team, option->name)) {
+                       err = -EEXIST;
+                       goto rollback;
+               }
+               dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+               if (!dst_opts[i]) {
+                       err = -ENOMEM;
+                       goto rollback;
+               }
+       }
+
+       for (i = 0; i < option_count; i++)
+               list_add_tail(&dst_opts[i]->list, &team->option_list);
+
+       kfree(dst_opts);
+       return 0;
+
+rollback:
+       for (i = 0; i < option_count; i++)
+               kfree(dst_opts[i]);
+
+       kfree(dst_opts);
+       return err;
+}
+
+EXPORT_SYMBOL(team_options_register);
+
+static void __team_options_change_check(struct team *team,
+                                       struct team_option *changed_option);
+
+static void __team_options_unregister(struct team *team,
+                                     const struct team_option *option,
+                                     size_t option_count)
+{
+       int i;
+
+       for (i = 0; i < option_count; i++, option++) {
+               struct team_option *del_opt;
+
+               del_opt = __team_find_option(team, option->name);
+               if (del_opt) {
+                       list_del(&del_opt->list);
+                       kfree(del_opt);
+               }
+       }
+}
+
+void team_options_unregister(struct team *team,
+                            const struct team_option *option,
+                            size_t option_count)
+{
+       __team_options_unregister(team, option, option_count);
+       __team_options_change_check(team, NULL);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+                          void *arg)
+{
+       return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+                          void *arg)
+{
+       int err;
+
+       err = option->setter(team, arg);
+       if (err)
+               return err;
+
+       __team_options_change_check(team, option);
+       return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+       struct team_mode *mode;
+
+       list_for_each_entry(mode, &mode_list, list) {
+               if (strcmp(mode->kind, kind) == 0)
+                       return mode;
+       }
+       return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+       while (*name != '\0') {
+               if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+                       return false;
+               name++;
+       }
+       return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+       int err = 0;
+
+       if (!is_good_mode_name(mode->kind) ||
+           mode->priv_size > TEAM_MODE_PRIV_SIZE)
+               return -EINVAL;
+       spin_lock(&mode_list_lock);
+       if (__find_mode(mode->kind)) {
+               err = -EEXIST;
+               goto unlock;
+       }
+       list_add_tail(&mode->list, &mode_list);
+unlock:
+       spin_unlock(&mode_list_lock);
+       return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+       spin_lock(&mode_list_lock);
+       list_del_init(&mode->list);
+       spin_unlock(&mode_list_lock);
+       return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+       struct team_mode *mode;
+
+       spin_lock(&mode_list_lock);
+       mode = __find_mode(kind);
+       if (!mode) {
+               spin_unlock(&mode_list_lock);
+               request_module("team-mode-%s", kind);
+               spin_lock(&mode_list_lock);
+               mode = __find_mode(kind);
+       }
+       if (mode)
+               if (!try_module_get(mode->owner))
+                       mode = NULL;
+
+       spin_unlock(&mode_list_lock);
+       return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+       module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+                                      struct team_port *port,
+                                      struct sk_buff *skb)
+{
+       return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+       /*
+        * To avoid checks in rx/tx skb paths, ensure here that non-null and
+        * correct ops are always set.
+        */
+
+       if (list_empty(&team->port_list) ||
+           !team->mode || !team->mode->ops->transmit)
+               team->ops.transmit = team_dummy_transmit;
+       else
+               team->ops.transmit = team->mode->ops->transmit;
+
+       if (list_empty(&team->port_list) ||
+           !team->mode || !team->mode->ops->receive)
+               team->ops.receive = team_dummy_receive;
+       else
+               team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+                             const struct team_mode *new_mode)
+{
+       /* Check if mode was previously set and do cleanup if so */
+       if (team->mode) {
+               void (*exit_op)(struct team *team) = team->ops.exit;
+
+               /* Clear ops area so no callback is called any longer */
+               memset(&team->ops, 0, sizeof(struct team_mode_ops));
+               team_adjust_ops(team);
+
+               if (exit_op)
+                       exit_op(team);
+               team_mode_put(team->mode);
+               team->mode = NULL;
+               /* zero private data area */
+               memset(&team->mode_priv, 0,
+                      sizeof(struct team) - offsetof(struct team, mode_priv));
+       }
+
+       if (!new_mode)
+               return 0;
+
+       if (new_mode->ops->init) {
+               int err;
+
+               err = new_mode->ops->init(team);
+               if (err)
+                       return err;
+       }
+
+       team->mode = new_mode;
+       memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+       team_adjust_ops(team);
+
+       return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+       struct team_mode *new_mode;
+       struct net_device *dev = team->dev;
+       int err;
+
+       if (!list_empty(&team->port_list)) {
+               netdev_err(dev, "No ports can be present during mode change\n");
+               return -EBUSY;
+       }
+
+       if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+               netdev_err(dev, "Unable to change to the same mode the team is in\n");
+               return -EINVAL;
+       }
+
+       new_mode = team_mode_get(kind);
+       if (!new_mode) {
+               netdev_err(dev, "Mode \"%s\" not found\n", kind);
+               return -EINVAL;
+       }
+
+       err = __team_change_mode(team, new_mode);
+       if (err) {
+               netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+               team_mode_put(new_mode);
+               return err;
+       }
+
+       netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+       return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct team_port *port;
+       struct team *team;
+       rx_handler_result_t res;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return RX_HANDLER_CONSUMED;
+
+       *pskb = skb;
+
+       port = team_port_get_rcu(skb->dev);
+       team = port->team;
+
+       res = team->ops.receive(team, port, skb);
+       if (res == RX_HANDLER_ANOTHER) {
+               struct team_pcpu_stats *pcpu_stats;
+
+               pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->rx_packets++;
+               pcpu_stats->rx_bytes += skb->len;
+               if (skb->pkt_type == PACKET_MULTICAST)
+                       pcpu_stats->rx_multicast++;
+               u64_stats_update_end(&pcpu_stats->syncp);
+
+               skb->dev = team->dev;
+       } else {
+               this_cpu_inc(team->pcpu_stats->rx_dropped);
+       }
+
+       return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+                          const struct team_port *port)
+{
+       struct team_port *cur;
+
+       list_for_each_entry(cur, &team->port_list, list)
+               if (cur == port)
+                       return true;
+       return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+                                   struct team_port *port)
+{
+       port->index = team->port_count++;
+       hlist_add_head_rcu(&port->hlist,
+                          team_port_index_hash(team, port->index));
+       list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+       int i;
+       struct team_port *port;
+
+       for (i = rm_index + 1; i < team->port_count; i++) {
+               port = team_get_port_by_index(team, i);
+               hlist_del_rcu(&port->hlist);
+               port->index--;
+               hlist_add_head_rcu(&port->hlist,
+                                  team_port_index_hash(team, port->index));
+       }
+}
+
+static void team_port_list_del_port(struct team *team,
+                                  struct team_port *port)
+{
+       int rm_index = port->index;
+
+       hlist_del_rcu(&port->hlist);
+       list_del_rcu(&port->list);
+       __reconstruct_port_hlist(team, rm_index);
+       team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+                           NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+                           NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+       struct team_port *port;
+       u32 vlan_features = TEAM_VLAN_FEATURES;
+       unsigned short max_hard_header_len = ETH_HLEN;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               vlan_features = netdev_increment_features(vlan_features,
+                                       port->dev->vlan_features,
+                                       TEAM_VLAN_FEATURES);
+
+               if (port->dev->hard_header_len > max_hard_header_len)
+                       max_hard_header_len = port->dev->hard_header_len;
+       }
+
+       team->dev->vlan_features = vlan_features;
+       team->dev->hard_header_len = max_hard_header_len;
+
+       netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+       mutex_lock(&team->lock);
+       __team_compute_features(team);
+       mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+       int err = 0;
+
+       dev_hold(team->dev);
+       port->dev->priv_flags |= IFF_TEAM_PORT;
+       if (team->ops.port_enter) {
+               err = team->ops.port_enter(team, port);
+               if (err) {
+                       netdev_err(team->dev, "Device %s failed to enter team mode\n",
+                                  port->dev->name);
+                       goto err_port_enter;
+               }
+       }
+
+       return 0;
+
+err_port_enter:
+       port->dev->priv_flags &= ~IFF_TEAM_PORT;
+       dev_put(team->dev);
+
+       return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+       if (team->ops.port_leave)
+               team->ops.port_leave(team, port);
+       port->dev->priv_flags &= ~IFF_TEAM_PORT;
+       dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+       struct net_device *dev = team->dev;
+       struct team_port *port;
+       char *portname = port_dev->name;
+       int err;
+
+       if (port_dev->flags & IFF_LOOPBACK ||
+           port_dev->type != ARPHRD_ETHER) {
+               netdev_err(dev, "Device %s is of an unsupported type\n",
+                          portname);
+               return -EINVAL;
+       }
+
+       if (team_port_exists(port_dev)) {
+               netdev_err(dev, "Device %s is already a port "
+                               "of a team device\n", portname);
+               return -EBUSY;
+       }
+
+       if (port_dev->flags & IFF_UP) {
+               netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+                          portname);
+               return -EBUSY;
+       }
+
+       port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+       if (!port)
+               return -ENOMEM;
+
+       port->dev = port_dev;
+       port->team = team;
+
+       port->orig.mtu = port_dev->mtu;
+       err = dev_set_mtu(port_dev, dev->mtu);
+       if (err) {
+               netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+               goto err_set_mtu;
+       }
+
+       memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+       err = team_port_enter(team, port);
+       if (err) {
+               netdev_err(dev, "Device %s failed to enter team mode\n",
+                          portname);
+               goto err_port_enter;
+       }
+
+       err = dev_open(port_dev);
+       if (err) {
+               netdev_dbg(dev, "Device %s opening failed\n",
+                          portname);
+               goto err_dev_open;
+       }
+
+       err = vlan_vids_add_by_dev(port_dev, dev);
+       if (err) {
+               netdev_err(dev, "Failed to add vlan ids to device %s\n",
+                               portname);
+               goto err_vids_add;
+       }
+
+       err = netdev_set_master(port_dev, dev);
+       if (err) {
+               netdev_err(dev, "Device %s failed to set master\n", portname);
+               goto err_set_master;
+       }
+
+       err = netdev_rx_handler_register(port_dev, team_handle_frame,
+                                        port);
+       if (err) {
+               netdev_err(dev, "Device %s failed to register rx_handler\n",
+                          portname);
+               goto err_handler_register;
+       }
+
+       team_port_list_add_port(team, port);
+       team_adjust_ops(team);
+       __team_compute_features(team);
+       __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+       netdev_info(dev, "Port device %s added\n", portname);
+
+       return 0;
+
+err_handler_register:
+       netdev_set_master(port_dev, NULL);
+
+err_set_master:
+       vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+       dev_close(port_dev);
+
+err_dev_open:
+       team_port_leave(team, port);
+       team_port_set_orig_mac(port);
+
+err_port_enter:
+       dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+       kfree(port);
+
+       return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+       struct net_device *dev = team->dev;
+       struct team_port *port;
+       char *portname = port_dev->name;
+
+       port = team_port_get_rtnl(port_dev);
+       if (!port || !team_port_find(team, port)) {
+               netdev_err(dev, "Device %s does not act as a port of this team\n",
+                          portname);
+               return -ENOENT;
+       }
+
+       __team_port_change_check(port, false);
+       team_port_list_del_port(team, port);
+       team_adjust_ops(team);
+       netdev_rx_handler_unregister(port_dev);
+       netdev_set_master(port_dev, NULL);
+       vlan_vids_del_by_dev(port_dev, dev);
+       dev_close(port_dev);
+       team_port_leave(team, port);
+       team_port_set_orig_mac(port);
+       dev_set_mtu(port_dev, port->orig.mtu);
+       synchronize_rcu();
+       kfree(port);
+       netdev_info(dev, "Port device %s removed\n", portname);
+       __team_compute_features(team);
+
+       return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+       const char **str = arg;
+
+       *str = team->mode ? team->mode->kind : team_no_mode_kind;
+       return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+       const char **str = arg;
+
+       return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+       {
+               .name = "mode",
+               .type = TEAM_OPTION_TYPE_STRING,
+               .getter = team_mode_option_get,
+               .setter = team_mode_option_set,
+       },
+};
+
+static int team_init(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       int i;
+       int err;
+
+       team->dev = dev;
+       mutex_init(&team->lock);
+
+       team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+       if (!team->pcpu_stats)
+               return -ENOMEM;
+
+       for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+               INIT_HLIST_HEAD(&team->port_hlist[i]);
+       INIT_LIST_HEAD(&team->port_list);
+
+       team_adjust_ops(team);
+
+       INIT_LIST_HEAD(&team->option_list);
+       err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+       if (err)
+               goto err_options_register;
+       netif_carrier_off(dev);
+
+       return 0;
+
+err_options_register:
+       free_percpu(team->pcpu_stats);
+
+       return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       struct team_port *tmp;
+
+       mutex_lock(&team->lock);
+       list_for_each_entry_safe(port, tmp, &team->port_list, list)
+               team_port_del(team, port->dev);
+
+       __team_change_mode(team, NULL); /* cleanup */
+       __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+       mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+
+       free_percpu(team->pcpu_stats);
+       free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+       netif_carrier_on(dev);
+       return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+       netif_carrier_off(dev);
+       return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       bool tx_success = false;
+       unsigned int len = skb->len;
+
+       tx_success = team->ops.transmit(team, skb);
+       if (tx_success) {
+               struct team_pcpu_stats *pcpu_stats;
+
+               pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->tx_packets++;
+               pcpu_stats->tx_bytes += len;
+               u64_stats_update_end(&pcpu_stats->syncp);
+       } else {
+               this_cpu_inc(team->pcpu_stats->tx_dropped);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       int inc;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               if (change & IFF_PROMISC) {
+                       inc = dev->flags & IFF_PROMISC ? 1 : -1;
+                       dev_set_promiscuity(port->dev, inc);
+               }
+               if (change & IFF_ALLMULTI) {
+                       inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+                       dev_set_allmulti(port->dev, inc);
+               }
+       }
+       rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               dev_uc_sync(port->dev, dev);
+               dev_mc_sync(port->dev, dev);
+       }
+       rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       struct sockaddr *addr = p;
+
+       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list)
+               if (team->ops.port_change_mac)
+                       team->ops.port_change_mac(team, port);
+       rcu_read_unlock();
+       return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       int err;
+
+       /*
+        * Alhough this is reader, it's guarded by team lock. It's not possible
+        * to traverse list in reverse under rcu_read_lock
+        */
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list) {
+               err = dev_set_mtu(port->dev, new_mtu);
+               if (err) {
+                       netdev_err(dev, "Device %s failed to change mtu",
+                                  port->dev->name);
+                       goto unwind;
+               }
+       }
+       mutex_unlock(&team->lock);
+
+       dev->mtu = new_mtu;
+
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(port, &team->port_list, list)
+               dev_set_mtu(port->dev, dev->mtu);
+       mutex_unlock(&team->lock);
+
+       return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_pcpu_stats *p;
+       u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+       u32 rx_dropped = 0, tx_dropped = 0;
+       unsigned int start;
+       int i;
+
+       for_each_possible_cpu(i) {
+               p = per_cpu_ptr(team->pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       rx_packets      = p->rx_packets;
+                       rx_bytes        = p->rx_bytes;
+                       rx_multicast    = p->rx_multicast;
+                       tx_packets      = p->tx_packets;
+                       tx_bytes        = p->tx_bytes;
+               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+               stats->rx_packets       += rx_packets;
+               stats->rx_bytes         += rx_bytes;
+               stats->multicast        += rx_multicast;
+               stats->tx_packets       += tx_packets;
+               stats->tx_bytes         += tx_bytes;
+               /*
+                * rx_dropped & tx_dropped are u32, updated
+                * without syncp protection.
+                */
+               rx_dropped      += p->rx_dropped;
+               tx_dropped      += p->tx_dropped;
+       }
+       stats->rx_dropped       = rx_dropped;
+       stats->tx_dropped       = tx_dropped;
+       return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+       int err;
+
+       /*
+        * Alhough this is reader, it's guarded by team lock. It's not possible
+        * to traverse list in reverse under rcu_read_lock
+        */
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list) {
+               err = vlan_vid_add(port->dev, vid);
+               if (err)
+                       goto unwind;
+       }
+       mutex_unlock(&team->lock);
+
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(port, &team->port_list, list)
+               vlan_vid_del(port->dev, vid);
+       mutex_unlock(&team->lock);
+
+       return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+       struct team *team = netdev_priv(dev);
+       struct team_port *port;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list)
+               vlan_vid_del(port->dev, vid);
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       int err;
+
+       mutex_lock(&team->lock);
+       err = team_port_add(team, port_dev);
+       mutex_unlock(&team->lock);
+       return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+       struct team *team = netdev_priv(dev);
+       int err;
+
+       mutex_lock(&team->lock);
+       err = team_port_del(team, port_dev);
+       mutex_unlock(&team->lock);
+       return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+                                          netdev_features_t features)
+{
+       struct team_port *port;
+       struct team *team = netdev_priv(dev);
+       netdev_features_t mask;
+
+       mask = features;
+       features &= ~NETIF_F_ONE_FOR_ALL;
+       features |= NETIF_F_ALL_FOR_ALL;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
+       }
+       rcu_read_unlock();
+       return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+       .ndo_init               = team_init,
+       .ndo_uninit             = team_uninit,
+       .ndo_open               = team_open,
+       .ndo_stop               = team_close,
+       .ndo_start_xmit         = team_xmit,
+       .ndo_change_rx_flags    = team_change_rx_flags,
+       .ndo_set_rx_mode        = team_set_rx_mode,
+       .ndo_set_mac_address    = team_set_mac_address,
+       .ndo_change_mtu         = team_change_mtu,
+       .ndo_get_stats64        = team_get_stats64,
+       .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
+       .ndo_add_slave          = team_add_slave,
+       .ndo_del_slave          = team_del_slave,
+       .ndo_fix_features       = team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+
+       dev->netdev_ops = &team_netdev_ops;
+       dev->destructor = team_destructor;
+       dev->tx_queue_len = 0;
+       dev->flags |= IFF_MULTICAST;
+       dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+       /*
+        * Indicate we support unicast address filtering. That way core won't
+        * bring us to promisc mode in case a unicast addr is added.
+        * Let this up to underlay drivers.
+        */
+       dev->priv_flags |= IFF_UNICAST_FLT;
+
+       dev->features |= NETIF_F_LLTX;
+       dev->features |= NETIF_F_GRO;
+       dev->hw_features = NETIF_F_HW_VLAN_TX |
+                          NETIF_F_HW_VLAN_RX |
+                          NETIF_F_HW_VLAN_FILTER;
+
+       dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       int err;
+
+       if (tb[IFLA_ADDRESS] == NULL)
+               random_ether_addr(dev->dev_addr);
+
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+       .kind           = DRV_NAME,
+       .priv_size      = sizeof(struct team),
+       .setup          = team_setup,
+       .newlink        = team_newlink,
+       .validate       = team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+       .id             = GENL_ID_GENERATE,
+       .name           = TEAM_GENL_NAME,
+       .version        = TEAM_GENL_VERSION,
+       .maxattr        = TEAM_ATTR_MAX,
+       .netnsok        = true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+       [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
+       [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
+       [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
+       [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+       [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
+       [TEAM_ATTR_OPTION_NAME] = {
+               .type = NLA_STRING,
+               .len = TEAM_STRING_MAX_LEN,
+       },
+       [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
+       [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
+       [TEAM_ATTR_OPTION_DATA] = {
+               .type = NLA_BINARY,
+               .len = TEAM_STRING_MAX_LEN,
+       },
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *msg;
+       void *hdr;
+       int err;
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+                         &team_nl_family, 0, TEAM_CMD_NOOP);
+       if (IS_ERR(hdr)) {
+               err = PTR_ERR(hdr);
+               goto err_msg_put;
+       }
+
+       genlmsg_end(msg, hdr);
+
+       return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+       nlmsg_free(msg);
+
+       return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+       struct net *net = genl_info_net(info);
+       int ifindex;
+       struct net_device *dev;
+       struct team *team;
+
+       if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+               return NULL;
+
+       ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+       dev = dev_get_by_index(net, ifindex);
+       if (!dev || dev->netdev_ops != &team_netdev_ops) {
+               if (dev)
+                       dev_put(dev);
+               return NULL;
+       }
+
+       team = netdev_priv(dev);
+       mutex_lock(&team->lock);
+       return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+       mutex_unlock(&team->lock);
+       dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+                               int (*fill_func)(struct sk_buff *skb,
+                                                struct genl_info *info,
+                                                int flags, struct team *team))
+{
+       struct sk_buff *skb;
+       int err;
+
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       err = fill_func(skb, info, NLM_F_ACK, team);
+       if (err < 0)
+               goto err_fill;
+
+       err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+       return err;
+
+err_fill:
+       nlmsg_free(skb);
+       return err;
+}
+
+static int team_nl_fill_options_get_changed(struct sk_buff *skb,
+                                           u32 pid, u32 seq, int flags,
+                                           struct team *team,
+                                           struct team_option *changed_option)
+{
+       struct nlattr *option_list;
+       void *hdr;
+       struct team_option *option;
+
+       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+                         TEAM_CMD_OPTIONS_GET);
+       if (IS_ERR(hdr))
+               return PTR_ERR(hdr);
+
+       NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+       option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+       if (!option_list)
+               return -EMSGSIZE;
+
+       list_for_each_entry(option, &team->option_list, list) {
+               struct nlattr *option_item;
+               long arg;
+
+               option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+               if (!option_item)
+                       goto nla_put_failure;
+               NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+               if (option == changed_option)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+               switch (option->type) {
+               case TEAM_OPTION_TYPE_U32:
+                       NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+                       team_option_get(team, option, &arg);
+                       NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+                       break;
+               case TEAM_OPTION_TYPE_STRING:
+                       NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+                       team_option_get(team, option, &arg);
+                       NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+                                      (char *) arg);
+                       break;
+               default:
+                       BUG();
+               }
+               nla_nest_end(skb, option_item);
+       }
+
+       nla_nest_end(skb, option_list);
+       return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+       return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+                                   struct genl_info *info, int flags,
+                                   struct team *team)
+{
+       return team_nl_fill_options_get_changed(skb, info->snd_pid,
+                                               info->snd_seq, NLM_F_ACK,
+                                               team, NULL);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct team *team;
+       int err;
+
+       team = team_nl_team_get(info);
+       if (!team)
+               return -EINVAL;
+
+       err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+
+       team_nl_team_put(team);
+
+       return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct team *team;
+       int err = 0;
+       int i;
+       struct nlattr *nl_option;
+
+       team = team_nl_team_get(info);
+       if (!team)
+               return -EINVAL;
+
+       err = -EINVAL;
+       if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+               err = -EINVAL;
+               goto team_put;
+       }
+
+       nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+               struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+               enum team_option_type opt_type;
+               struct team_option *option;
+               char *opt_name;
+               bool opt_found = false;
+
+               if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+                       err = -EINVAL;
+                       goto team_put;
+               }
+               err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+                                      nl_option, team_nl_option_policy);
+               if (err)
+                       goto team_put;
+               if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+                   !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+                   !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+                       err = -EINVAL;
+                       goto team_put;
+               }
+               switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+               case NLA_U32:
+                       opt_type = TEAM_OPTION_TYPE_U32;
+                       break;
+               case NLA_STRING:
+                       opt_type = TEAM_OPTION_TYPE_STRING;
+                       break;
+               default:
+                       goto team_put;
+               }
+
+               opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+               list_for_each_entry(option, &team->option_list, list) {
+                       long arg;
+                       struct nlattr *opt_data_attr;
+
+                       if (option->type != opt_type ||
+                           strcmp(option->name, opt_name))
+                               continue;
+                       opt_found = true;
+                       opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+                       switch (opt_type) {
+                       case TEAM_OPTION_TYPE_U32:
+                               arg = nla_get_u32(opt_data_attr);
+                               break;
+                       case TEAM_OPTION_TYPE_STRING:
+                               arg = (long) nla_data(opt_data_attr);
+                               break;
+                       default:
+                               BUG();
+                       }
+                       err = team_option_set(team, option, &arg);
+                       if (err)
+                               goto team_put;
+               }
+               if (!opt_found) {
+                       err = -ENOENT;
+                       goto team_put;
+               }
+       }
+
+team_put:
+       team_nl_team_put(team);
+
+       return err;
+}
+
+static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
+                                             u32 pid, u32 seq, int flags,
+                                             struct team *team,
+                                             struct team_port *changed_port)
+{
+       struct nlattr *port_list;
+       void *hdr;
+       struct team_port *port;
+
+       hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+                         TEAM_CMD_PORT_LIST_GET);
+       if (IS_ERR(hdr))
+               return PTR_ERR(hdr);
+
+       NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+       port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+       if (!port_list)
+               return -EMSGSIZE;
+
+       list_for_each_entry(port, &team->port_list, list) {
+               struct nlattr *port_item;
+
+               port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+               if (!port_item)
+                       goto nla_put_failure;
+               NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+               if (port == changed_port)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+               if (port->linkup)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+               NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+               NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+               nla_nest_end(skb, port_item);
+       }
+
+       nla_nest_end(skb, port_list);
+       return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+       genlmsg_cancel(skb, hdr);
+       return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+                                     struct genl_info *info, int flags,
+                                     struct team *team)
+{
+       return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
+                                                 info->snd_seq, NLM_F_ACK,
+                                                 team, NULL);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+                                    struct genl_info *info)
+{
+       struct team *team;
+       int err;
+
+       team = team_nl_team_get(info);
+       if (!team)
+               return -EINVAL;
+
+       err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+
+       team_nl_team_put(team);
+
+       return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+       {
+               .cmd = TEAM_CMD_NOOP,
+               .doit = team_nl_cmd_noop,
+               .policy = team_nl_policy,
+       },
+       {
+               .cmd = TEAM_CMD_OPTIONS_SET,
+               .doit = team_nl_cmd_options_set,
+               .policy = team_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = TEAM_CMD_OPTIONS_GET,
+               .doit = team_nl_cmd_options_get,
+               .policy = team_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+       {
+               .cmd = TEAM_CMD_PORT_LIST_GET,
+               .doit = team_nl_cmd_port_list_get,
+               .policy = team_nl_policy,
+               .flags = GENL_ADMIN_PERM,
+       },
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+       .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team,
+                                         struct team_option *changed_option)
+{
+       struct sk_buff *skb;
+       int err;
+       struct net *net = dev_net(team->dev);
+
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
+                                              changed_option);
+       if (err < 0)
+               goto err_fill;
+
+       err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+                                     GFP_KERNEL);
+       return err;
+
+err_fill:
+       nlmsg_free(skb);
+       return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team_port *port)
+{
+       struct sk_buff *skb;
+       int err;
+       struct net *net = dev_net(port->team->dev);
+
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
+                                                port->team, port);
+       if (err < 0)
+               goto err_fill;
+
+       err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+                                     GFP_KERNEL);
+       return err;
+
+err_fill:
+       nlmsg_free(skb);
+       return err;
+}
+
+static int team_nl_init(void)
+{
+       int err;
+
+       err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+                                           ARRAY_SIZE(team_nl_ops));
+       if (err)
+               return err;
+
+       err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+       if (err)
+               goto err_change_event_grp_reg;
+
+       return 0;
+
+err_change_event_grp_reg:
+       genl_unregister_family(&team_nl_family);
+
+       return err;
+}
+
+static void team_nl_fini(void)
+{
+       genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team,
+                                       struct team_option *changed_option)
+{
+       int err;
+
+       err = team_nl_send_event_options_get(team, changed_option);
+       if (err)
+               netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+       int err;
+
+       if (port->linkup == linkup)
+               return;
+
+       port->linkup = linkup;
+       if (linkup) {
+               struct ethtool_cmd ecmd;
+
+               err = __ethtool_get_settings(port->dev, &ecmd);
+               if (!err) {
+                       port->speed = ethtool_cmd_speed(&ecmd);
+                       port->duplex = ecmd.duplex;
+                       goto send_event;
+               }
+       }
+       port->speed = 0;
+       port->duplex = 0;
+
+send_event:
+       err = team_nl_send_event_port_list_get(port);
+       if (err)
+               netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+                           port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+       struct team *team = port->team;
+
+       mutex_lock(&team->lock);
+       __team_port_change_check(port, linkup);
+       mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+                            unsigned long event, void *ptr)
+{
+       struct net_device *dev = (struct net_device *) ptr;
+       struct team_port *port;
+
+       port = team_port_get_rtnl(dev);
+       if (!port)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (netif_carrier_ok(dev))
+                       team_port_change_check(port, true);
+       case NETDEV_DOWN:
+               team_port_change_check(port, false);
+       case NETDEV_CHANGE:
+               if (netif_running(port->dev))
+                       team_port_change_check(port,
+                                              !!netif_carrier_ok(port->dev));
+               break;
+       case NETDEV_UNREGISTER:
+               team_del_slave(port->team->dev, dev);
+               break;
+       case NETDEV_FEAT_CHANGE:
+               team_compute_features(port->team);
+               break;
+       case NETDEV_CHANGEMTU:
+               /* Forbid to change mtu of underlaying device */
+               return NOTIFY_BAD;
+       case NETDEV_PRE_TYPE_CHANGE:
+               /* Forbid to change type of underlaying device */
+               return NOTIFY_BAD;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+       .notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+       int err;
+
+       register_netdevice_notifier(&team_notifier_block);
+
+       err = rtnl_link_register(&team_link_ops);
+       if (err)
+               goto err_rtnl_reg;
+
+       err = team_nl_init();
+       if (err)
+               goto err_nl_init;
+
+       return 0;
+
+err_nl_init:
+       rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+       unregister_netdevice_notifier(&team_notifier_block);
+
+       return err;
+}
+
+static void __exit team_module_exit(void)
+{
+       team_nl_fini();
+       rtnl_link_unregister(&team_link_ops);
+       unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644 (file)
index 0000000..f4d960e
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+       struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+       return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+                                     struct sk_buff *skb) {
+       struct team_port *active_port;
+
+       active_port = rcu_dereference(ab_priv(team)->active_port);
+       if (active_port != port)
+               return RX_HANDLER_EXACT;
+       return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct team_port *active_port;
+
+       active_port = rcu_dereference(ab_priv(team)->active_port);
+       if (unlikely(!active_port))
+               goto drop;
+       skb->dev = active_port->dev;
+       if (dev_queue_xmit(skb))
+               return false;
+       return true;
+
+drop:
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+       if (ab_priv(team)->active_port == port)
+               RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+       u32 *ifindex = arg;
+
+       *ifindex = 0;
+       if (ab_priv(team)->active_port)
+               *ifindex = ab_priv(team)->active_port->dev->ifindex;
+       return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+       u32 *ifindex = arg;
+       struct team_port *port;
+
+       list_for_each_entry_rcu(port, &team->port_list, list) {
+               if (port->dev->ifindex == *ifindex) {
+                       rcu_assign_pointer(ab_priv(team)->active_port, port);
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+       {
+               .name = "activeport",
+               .type = TEAM_OPTION_TYPE_U32,
+               .getter = ab_active_port_get,
+               .setter = ab_active_port_set,
+       },
+};
+
+int ab_init(struct team *team)
+{
+       return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+       team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+       .init                   = ab_init,
+       .exit                   = ab_exit,
+       .receive                = ab_receive,
+       .transmit               = ab_transmit,
+       .port_leave             = ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+       .kind           = "activebackup",
+       .owner          = THIS_MODULE,
+       .priv_size      = sizeof(struct ab_priv),
+       .ops            = &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+       return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+       team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644 (file)
index 0000000..a0e8f80
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+       unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+       return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+                                            struct team_port *port)
+{
+       struct team_port *cur;
+
+       if (port->linkup)
+               return port;
+       cur = port;
+       list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+               if (cur->linkup)
+                       return cur;
+       list_for_each_entry_rcu(cur, &team->port_list, list) {
+               if (cur == port)
+                       break;
+               if (cur->linkup)
+                       return cur;
+       }
+       return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+       struct team_port *port;
+       int port_index;
+
+       port_index = rr_priv(team)->sent_packets++ % team->port_count;
+       port = team_get_port_by_index_rcu(team, port_index);
+       port = __get_first_port_up(team, port);
+       if (unlikely(!port))
+               goto drop;
+       skb->dev = port->dev;
+       if (dev_queue_xmit(skb))
+               return false;
+       return true;
+
+drop:
+       dev_kfree_skb_any(skb);
+       return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+       return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+       team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+       .transmit               = rr_transmit,
+       .port_enter             = rr_port_enter,
+       .port_change_mac        = rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+       .kind           = "roundrobin",
+       .owner          = THIS_MODULE,
+       .priv_size      = sizeof(struct rr_priv),
+       .ops            = &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+       return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+       team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
index 7bea9c6..93c5d72 100644 (file)
@@ -123,7 +123,7 @@ struct tun_struct {
        gid_t                   group;
 
        struct net_device       *dev;
-       u32                     set_features;
+       netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
                          NETIF_F_TSO6|NETIF_F_UFO)
        struct fasync_struct    *fasync;
@@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
@@ -1196,7 +1197,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
  * privs required. */
 static int set_offload(struct tun_struct *tun, unsigned long arg)
 {
-       u32 features = 0;
+       netdev_features_t features = 0;
 
        if (arg & TUN_F_CSUM) {
                features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1590,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
 {
        struct tun_struct *tun = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
        switch (tun->flags & TUN_TYPE_MASK) {
        case TUN_TUN_DEV:
-               strcpy(info->bus_info, "tun");
+               strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
                break;
        case TUN_TAP_DEV:
-               strcpy(info->bus_info, "tap");
+               strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
                break;
        }
 }
index a60d006..331e440 100644 (file)
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
        struct page *page;
        int err;
 
-       page = __netdev_alloc_page(dev, gfp_flags);
+       page = alloc_page(gfp_flags);
        if (!page)
                return -ENOMEM;
 
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
        err = usb_submit_urb(req, gfp_flags);
        if (unlikely(err)) {
                dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
-               netdev_free_page(dev, page);
+               put_page(page);
        }
        return err;
 }
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
        dev->stats.rx_errors++;
 resubmit:
        if (page)
-               netdev_free_page(dev, page);
+               put_page(page);
        if (req)
-               rx_submit(pnd, req, GFP_ATOMIC);
+               rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
        for (i = 0; i < rxq_size; i++) {
                struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
 
-               if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+               if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
                        usbpn_close(dev);
                        return -ENOMEM;
                }
index f06fb78..009dd0f 100644 (file)
@@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
        int temp;
        u8 iface_no;
 
-       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx == NULL)
                return -ENODEV;
 
-       memset(ctx, 0, sizeof(*ctx));
-
        init_timer(&ctx->tx_timer);
        spin_lock_init(&ctx->mtx);
        ctx->netdev = dev->net;
index a5b9b12..7d62c39 100644 (file)
@@ -728,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
 }
 
 /* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct usbnet *dev = netdev_priv(netdev);
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
index eff6767..56f3894 100644 (file)
@@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
 }
 
 /* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+       netdev_features_t features)
 {
        struct usbnet *dev = netdev_priv(netdev);
        u32 read_buf;
index ef883e9..49f4667 100644 (file)
@@ -27,8 +27,8 @@
 
 struct veth_net_stats {
        u64                     rx_packets;
-       u64                     tx_packets;
        u64                     rx_bytes;
+       u64                     tx_packets;
        u64                     tx_bytes;
        u64                     rx_dropped;
        struct u64_stats_sync   syncp;
@@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->fw_version, "N/A");
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev)
        dev->features |= NETIF_F_LLTX;
        dev->destructor = veth_dev_free;
 
-       dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+       dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
 }
 
 /*
index 6ee8410..609c51f 100644 (file)
@@ -39,6 +39,7 @@ module_param(gso, bool, 0444);
 #define GOOD_COPY_LEN  128
 
 #define VIRTNET_SEND_COMMAND_SG_MAX    2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
 
 struct virtnet_stats {
        struct u64_stats_sync syncp;
@@ -699,6 +700,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
        }
 
        tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
        tot->rx_dropped = dev->stats.rx_dropped;
        tot->rx_length_errors = dev->stats.rx_length_errors;
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -853,7 +855,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        kfree(buf);
 }
 
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -863,9 +865,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+       return 0;
 }
 
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -875,6 +878,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+       return 0;
 }
 
 static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +893,21 @@ static void virtnet_get_ringparam(struct net_device *dev,
 
 }
 
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+                               struct ethtool_drvinfo *info)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct virtio_device *vdev = vi->vdev;
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
+       .get_drvinfo = virtnet_get_drvinfo,
        .get_link = ethtool_op_get_link,
        .get_ringparam = virtnet_get_ringparam,
 };
index d96bfb1..1c2ae11 100644 (file)
@@ -1926,7 +1926,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1943,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
        }
 
        set_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 
-static void
+static int
 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1965,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        }
 
        clear_bit(vid, adapter->active_vlans);
+
+       return 0;
 }
 
 
index e662cbc..b492ee1 100644 (file)
@@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
-       drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
 
        strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
                sizeof(drvinfo->version));
-       drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
-       strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
-       drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
        }
 }
 
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
 {
        struct vmxnet3_adapter *adapter = netdev_priv(netdev);
        unsigned long flags;
-       u32 changed = features ^ netdev->features;
+       netdev_features_t changed = features ^ netdev->features;
 
        if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
                if (features & NETIF_F_RXCSUM)
index b18eac1..ed54797 100644 (file)
@@ -401,7 +401,7 @@ void
 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
 
 int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
 
 int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
index 0a304b0..98db761 100644 (file)
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
 obj-$(CONFIG_MWL8K)    += mwl8k.o
 
 obj-$(CONFIG_IWLWIFI)  += iwlwifi/
-obj-$(CONFIG_IWLWIFI_LEGACY)   += iwlegacy/
+obj-$(CONFIG_IWLEGACY) += iwlegacy/
 obj-$(CONFIG_RT2X00)   += rt2x00/
 
 obj-$(CONFIG_P54_COMMON)       += p54/
@@ -58,6 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA)    += wl12xx/
 obj-$(CONFIG_IWM)      += iwmc3200wifi/
 
 obj-$(CONFIG_MWIFIEX)  += mwifiex/
-obj-$(CONFIG_BRCMFMAC) += brcm80211/
-obj-$(CONFIG_BRCMUMAC) += brcm80211/
-obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_BRCMFMAC) += brcm80211/
+obj-$(CONFIG_BRCMSMAC) += brcm80211/
index ac1176a..1c008c6 100644 (file)
@@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
        emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
        emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
        emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
-       emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+       emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
        emmh32_final(&context->seed, (u8*)&mic->mic);
 
        /*    New Type/length ?????????? */
@@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
                emmh32_update(&context->seed, eth->da, ETH_ALEN*2); 
                emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); 
                emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));        
-               emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);     
+               emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);  
                //Calculate MIC
                emmh32_final(&context->seed, digest);
        
index 0f9ee46..4596c33 100644 (file)
@@ -239,6 +239,7 @@ enum ATH_DEBUG {
        ATH_DBG_BTCOEX          = 0x00002000,
        ATH_DBG_WMI             = 0x00004000,
        ATH_DBG_BSTUCK          = 0x00008000,
+       ATH_DBG_MCI             = 0x00010000,
        ATH_DBG_ANY             = 0xffffffff
 };
 
index e5be7e7..ee7ea57 100644 (file)
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
                if (to_platform_device(ah->dev)->id == 0 &&
                    (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
                     (BD_WLAN1 | BD_WLAN0))
-                       __set_bit(ATH_STAT_2G_DISABLED, ah->status);
+                       ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+               else
+                       ah->ah_capabilities.cap_needs_2GHz_ovr = false;
        }
 
        ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
index bea90e6..bf67416 100644 (file)
  * or reducing sensitivity as necessary.
  *
  * The parameters are:
+ *
  *   - "noise immunity"
+ *
  *   - "spur immunity"
+ *
  *   - "firstep level"
+ *
  *   - "OFDM weak signal detection"
+ *
  *   - "CCK weak signal detection"
  *
  * Basically we look at the amount of ODFM and CCK timing errors we get and then
  * raise or lower immunity accordingly by setting one or more of these
  * parameters.
+ *
  * Newer chipsets have PHY error counters in hardware which will generate a MIB
  * interrupt when they overflow. Older hardware has too enable PHY error frames
  * by setting a RX flag and then count every single PHY error. When a specified
  */
 
 
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
 
 /**
  * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
  */
 void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
  * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @max_spur_level (the maximum level is dependent
- *     on the chip revision).
+ * on the chip revision).
  */
 void
 ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
  * ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
  * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
  */
 void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
        ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
 }
 
-
 /**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- *                                             detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
  * @on: turn on or off
  */
 void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
                          on ? "on" : "off");
 }
 
-
 /**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
  * @on: turn on or off
  */
 void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
 }
 
 
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
 
 /**
  * ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- *     the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
  *
  * Try to raise noise immunity (=decrease sensitivity) in several steps
  * depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
        */
 }
 
-
 /**
  * ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Try to lower noise immunity (=increase sensitivity) in several steps
  * depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
        }
 }
 
-
 /**
  * ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Return an approximation of the time spent "listening" in milliseconds (ms)
  * since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
        return listen;
 }
 
-
 /**
  * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
  *
  * Clear the PHY error counters as soon as possible, since this might be called
  * from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
        return 1;
 }
 
-
 /**
  * ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
  *
  * Just reset counters, so they are clear for the next "ani period".
  */
 static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
 {
        /* keep last values for debugging */
        as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
        as->listen_time = 0;
 }
 
-
 /**
  * ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
  *
  * We count OFDM and CCK errors relative to the time where we did not send or
  * receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
                /* too many PHY errors - we have to raise immunity */
                bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
                ath5k_ani_raise_immunity(ah, as, ofdm_flag);
-               ath5k_ani_period_restart(ah, as);
+               ath5k_ani_period_restart(as);
 
        } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
                /* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
                if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
                        ath5k_ani_lower_immunity(ah, as);
 
-               ath5k_ani_period_restart(ah, as);
+               ath5k_ani_period_restart(as);
        }
 }
 
 
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
 
 /**
  * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
  *
  * Just read & reset the registers quickly, so they don't generate more
  * interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
                tasklet_schedule(&ah->ani_tasklet);
 }
 
-
 /**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
  *
  * This is used by hardware without PHY error counters to report PHY errors
  * on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
 }
 
 
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
 
 /**
  * ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
  *
  * Enable PHY error counters for OFDM and CCK timing errors.
  */
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
        ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
 }
 
-
 /**
  * ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
  *
  * Disable PHY error counters for OFDM and CCK timing errors.
  */
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
        ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
 }
 
-
 /**
  * ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
  *
  * Initialize ANI according to mode.
  */
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
 }
 
 
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
 
 #ifdef CONFIG_ATH5K_DEBUG
 
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
 void
 ath5k_ani_print_counters(struct ath5k_hw *ah)
 {
index 7358b6c..21aa355 100644 (file)
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
  * enum ath5k_ani_mode - mode for ANI / noise sensitivity
  *
  * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- *     algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- *     maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- *     minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- *     amount of OFDM and CCK frame errors (default).
+ *                     algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ *                     maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ *                     minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ *                     amount of OFDM and CCK frame errors (default).
  */
 enum ath5k_ani_mode {
        ATH5K_ANI_MODE_OFF              = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
 
 /**
  * struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
  */
 struct ath5k_ani_state {
        enum ath5k_ani_mode     ani_mode;
index fecbcd9..e564e58 100644 (file)
 #define AR5K_TUNE_MAX_TXPOWER                  63
 #define AR5K_TUNE_DEFAULT_TXPOWER              25
 #define AR5K_TUNE_TPC_TXPOWER                  false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    10000   /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL    60000   /* 60 sec */
+#define        ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT   10000   /* 10 sec */
 #define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI    1000    /* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF     60000   /* 60 sec */
-
 #define ATH5K_TX_COMPLETE_POLL_INT             3000    /* 3 sec */
 
 #define AR5K_INIT_CARR_SENSE_EN                        1
 #define        AR5K_AGC_SETTLING_TURBO                 37
 
 
-/* GENERIC CHIPSET DEFINITIONS */
 
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
 enum ath5k_version {
        AR5K_AR5210     = 0,
        AR5K_AR5211     = 1,
        AR5K_AR5212     = 2,
 };
 
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
 enum ath5k_radio {
        AR5K_RF5110     = 0,
        AR5K_RF5111     = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
 #define AR5K_SREV_AR5213A      0x59 /* Hainan */
 #define AR5K_SREV_AR2413       0x78 /* Griffin lite */
 #define AR5K_SREV_AR2414       0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6    0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7    0x87 /* AP51-Full */
 #define AR5K_SREV_AR5424       0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1    0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2    0x91 /* AP61-Full */
 #define AR5K_SREV_AR5413       0xa4 /* Eagle lite */
 #define AR5K_SREV_AR5414       0xa0 /* Eagle */
 #define AR5K_SREV_AR2415       0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
 
 /* TODO add support to mac80211 for vendor-specific rates and modes */
 
-/*
+/**
+ * DOC: Atheros XR
+ *
  * Some of this information is based on Documentation from:
  *
  * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
  *
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
  *
  * Please note that can you either use XR or TURBO but you cannot use both,
  * they are exclusive.
  *
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
  */
-#define MODULATION_XR          0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
  * There is also a distinction between "static" and "dynamic" turbo modes:
  *
  * - Static: is the dumb version: devices set to this mode stick to it until
  *     the mode is turned off.
+ *
  * - Dynamic: is the intelligent version, the network decides itself if it
  *     is ok to use turbo. As soon as traffic is detected on adjacent channels
  *     (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
  *
  * http://www.pcworld.com/article/id,113428-page,1/article.html
  *
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
  *
  * - Bursting: allows multiple frames to be sent at once, rather than pausing
  *     after each frame. Bursting is a standards-compliant feature that can be
  *     used with any Access Point.
+ *
  * - Fast frames: increases the amount of information that can be sent per
  *     frame, also resulting in a reduction of transmission overhead. It is a
  *     proprietary feature that needs to be supported by the Access Point.
+ *
  * - Compression: data frames are compressed in real time using a Lempel Ziv
  *     algorithm. This is done transparently. Once this feature is enabled,
  *     compression and decompression takes place inside the chipset, without
  *     putting additional load on the host CPU.
  *
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
  */
-#define MODULATION_TURBO       0x00000080
 
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
 enum ath5k_driver_mode {
        AR5K_MODE_11A           =       0,
        AR5K_MODE_11B           =       1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
        AR5K_MODE_MAX           =       3
 };
 
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
 enum ath5k_ant_mode {
-       AR5K_ANTMODE_DEFAULT    = 0,    /* default antenna setup */
-       AR5K_ANTMODE_FIXED_A    = 1,    /* only antenna A is present */
-       AR5K_ANTMODE_FIXED_B    = 2,    /* only antenna B is present */
-       AR5K_ANTMODE_SINGLE_AP  = 3,    /* sta locked on a single ap */
-       AR5K_ANTMODE_SECTOR_AP  = 4,    /* AP with tx antenna set on tx desc */
-       AR5K_ANTMODE_SECTOR_STA = 5,    /* STA with tx antenna set on tx desc */
-       AR5K_ANTMODE_DEBUG      = 6,    /* Debug mode -A -> Rx, B-> Tx- */
+       AR5K_ANTMODE_DEFAULT    = 0,
+       AR5K_ANTMODE_FIXED_A    = 1,
+       AR5K_ANTMODE_FIXED_B    = 2,
+       AR5K_ANTMODE_SINGLE_AP  = 3,
+       AR5K_ANTMODE_SECTOR_AP  = 4,
+       AR5K_ANTMODE_SECTOR_STA = 5,
+       AR5K_ANTMODE_DEBUG      = 6,
        AR5K_ANTMODE_MAX,
 };
 
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
 enum ath5k_bw_mode {
-       AR5K_BWMODE_DEFAULT     = 0,    /* 20MHz, default operation */
-       AR5K_BWMODE_5MHZ        = 1,    /* Quarter rate */
-       AR5K_BWMODE_10MHZ       = 2,    /* Half rate */
-       AR5K_BWMODE_40MHZ       = 3     /* Turbo */
+       AR5K_BWMODE_DEFAULT     = 0,
+       AR5K_BWMODE_5MHZ        = 1,
+       AR5K_BWMODE_10MHZ       = 2,
+       AR5K_BWMODE_40MHZ       = 3
 };
 
+
+
 /****************\
   TX DEFINITIONS
 \****************/
 
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
  */
 struct ath5k_tx_status {
        u16     ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
  * enum ath5k_tx_queue - Queue types used to classify tx queues.
  * @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
  * @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
  * @AR5K_TX_QUEUE_BEACON: The beacon queue
  * @AR5K_TX_QUEUE_CAB: The after-beacon queue
  * @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
 enum ath5k_tx_queue {
        AR5K_TX_QUEUE_INACTIVE = 0,
        AR5K_TX_QUEUE_DATA,
-       AR5K_TX_QUEUE_XR_DATA,
        AR5K_TX_QUEUE_BEACON,
        AR5K_TX_QUEUE_CAB,
        AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
 #define        AR5K_NUM_TX_QUEUES              10
 #define        AR5K_NUM_TX_QUEUES_NOQCU        2
 
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
  * These are the 4 Access Categories as defined in
  * WME spec. 0 is the lowest priority and 4 is the
  * highest. Normal data that hasn't been classified
  * goes to the Best Effort AC.
  */
 enum ath5k_tx_queue_subtype {
-       AR5K_WME_AC_BK = 0,     /*Background traffic*/
-       AR5K_WME_AC_BE,         /*Best-effort (normal) traffic*/
-       AR5K_WME_AC_VI,         /*Video traffic*/
-       AR5K_WME_AC_VO,         /*Voice traffic*/
+       AR5K_WME_AC_BK = 0,
+       AR5K_WME_AC_BE,
+       AR5K_WME_AC_VI,
+       AR5K_WME_AC_VO,
 };
 
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
  */
 enum ath5k_tx_queue_id {
        AR5K_TX_QUEUE_ID_NOQCU_DATA     = 0,
        AR5K_TX_QUEUE_ID_NOQCU_BEACON   = 1,
-       AR5K_TX_QUEUE_ID_DATA_MIN       = 0, /*IEEE80211_TX_QUEUE_DATA0*/
-       AR5K_TX_QUEUE_ID_DATA_MAX       = 3, /*IEEE80211_TX_QUEUE_DATA3*/
-       AR5K_TX_QUEUE_ID_DATA_SVP       = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
-       AR5K_TX_QUEUE_ID_CAB            = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
-       AR5K_TX_QUEUE_ID_BEACON         = 7, /*IEEE80211_TX_QUEUE_BEACON*/
-       AR5K_TX_QUEUE_ID_UAPSD          = 8,
-       AR5K_TX_QUEUE_ID_XR_DATA        = 9,
+       AR5K_TX_QUEUE_ID_DATA_MIN       = 0,
+       AR5K_TX_QUEUE_ID_DATA_MAX       = 3,
+       AR5K_TX_QUEUE_ID_UAPSD          = 7,
+       AR5K_TX_QUEUE_ID_CAB            = 8,
+       AR5K_TX_QUEUE_ID_BEACON         = 9,
 };
 
 /*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
 #define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS                0x1000  /* Disable backoff while bursting */
 #define AR5K_TXQ_FLAG_COMPRESSION_ENABLE       0x2000  /* Enable hw compression -not implemented-*/
 
-/*
- * Data transmit queue state.  One of these exists for each
- * hardware transmit queue.  Packets sent to us from above
- * are assigned to queues based on their priority.  Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority.  Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
  */
 struct ath5k_txq {
-       unsigned int            qnum;   /* hardware q number */
-       u32                     *link;  /* link ptr in last TX desc */
-       struct list_head        q;      /* transmit queue */
-       spinlock_t              lock;   /* lock on q and link */
+       unsigned int            qnum;
+       u32                     *link;
+       struct list_head        q;
+       spinlock_t              lock;
        bool                    setup;
-       int                     txq_len; /* number of queued buffers */
-       int                     txq_max; /* max allowed num of queued buffers */
+       int                     txq_len;
+       int                     txq_max;
        bool                    txq_poll_mark;
-       unsigned int            txq_stuck;      /* informational counter */
+       unsigned int            txq_stuck;
 };
 
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
  */
 struct ath5k_txq_info {
        enum ath5k_tx_queue tqi_type;
        enum ath5k_tx_queue_subtype tqi_subtype;
-       u16     tqi_flags;      /* Tx queue flags (see above) */
-       u8      tqi_aifs;       /* Arbitrated Interframe Space */
-       u16     tqi_cw_min;     /* Minimum Contention Window */
-       u16     tqi_cw_max;     /* Maximum Contention Window */
-       u32     tqi_cbr_period; /* Constant bit rate period */
+       u16     tqi_flags;
+       u8      tqi_aifs;
+       u16     tqi_cw_min;
+       u16     tqi_cw_max;
+       u32     tqi_cbr_period;
        u32     tqi_cbr_overflow_limit;
        u32     tqi_burst_time;
-       u32     tqi_ready_time; /* Time queue waits after an event */
+       u32     tqi_ready_time;
 };
 
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
  */
 enum ath5k_pkt_type {
        AR5K_PKT_TYPE_NORMAL            = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
        (ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v)     \
 )
 
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
-       AR5K_DMASIZE_4B = 0,
-       AR5K_DMASIZE_8B,
-       AR5K_DMASIZE_16B,
-       AR5K_DMASIZE_32B,
-       AR5K_DMASIZE_64B,
-       AR5K_DMASIZE_128B,
-       AR5K_DMASIZE_256B,
-       AR5K_DMASIZE_512B
-};
 
 
 /****************\
   RX DEFINITIONS
 \****************/
 
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
  */
 struct ath5k_rx_status {
        u16     rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
 #define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
 
 
+
 /*******************************\
   GAIN OPTIMIZATION DEFINITIONS
 \*******************************/
 
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
 enum ath5k_rfgain {
        AR5K_RFGAIN_INACTIVE = 0,
        AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
        AR5K_RFGAIN_NEED_CHANGE,
 };
 
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
 struct ath5k_gain {
        u8                      g_step_idx;
        u8                      g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
        u8                      g_state;
 };
 
+
+
 /********************\
   COMMON DEFINITIONS
 \********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
 #define AR5K_SLOT_TIME_20      880
 #define AR5K_SLOT_TIME_MAX     0xffff
 
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
  * TODO: Clean up
  */
 struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
        u16     a2_athchan;
 };
 
+/**
+ * enum ath5k_dmasize -  DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+       AR5K_DMASIZE_4B = 0,
+       AR5K_DMASIZE_8B,
+       AR5K_DMASIZE_16B,
+       AR5K_DMASIZE_32B,
+       AR5K_DMASIZE_64B,
+       AR5K_DMASIZE_128B,
+       AR5K_DMASIZE_256B,
+       AR5K_DMASIZE_512B
+};
+
+
 
 /******************\
   RATE DEFINITIONS
 \******************/
 
 /**
+ * DOC: Rate codes
+ *
  * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
  *
  * The rate code is used to get the RX rate or set the TX rate on the
  * hardware descriptors. It is also used for internal modulation control
  * and settings.
  *
- * This is the hardware rate map we are aware of:
- *
- * rate_code   0x01    0x02    0x03    0x04    0x05    0x06    0x07    0x08
- * rate_kbps   3000    1000    ?       ?       ?       2000    500     48000
- *
- * rate_code   0x09    0x0A    0x0B    0x0C    0x0D    0x0E    0x0F    0x10
- * rate_kbps   24000   12000   6000    54000   36000   18000   9000    ?
+ * This is the hardware rate map we are aware of (html unfriendly):
  *
- * rate_code   17      18      19      20      21      22      23      24
- * rate_kbps   ?       ?       ?       ?       ?       ?       ?       11000
+ * Rate code   Rate (Kbps)
+ * ---------   -----------
+ * 0x01                 3000 (XR)
+ * 0x02                 1000 (XR)
+ * 0x03                  250 (XR)
+ * 0x04 - 05   -Reserved-
+ * 0x06                 2000 (XR)
+ * 0x07                  500 (XR)
+ * 0x08                48000 (OFDM)
+ * 0x09                24000 (OFDM)
+ * 0x0A                12000 (OFDM)
+ * 0x0B                 6000 (OFDM)
+ * 0x0C                54000 (OFDM)
+ * 0x0D                36000 (OFDM)
+ * 0x0E                18000 (OFDM)
+ * 0x0F                 9000 (OFDM)
+ * 0x10 - 17   -Reserved-
+ * 0x18                11000L (CCK)
+ * 0x19                 5500L (CCK)
+ * 0x1A                 2000L (CCK)
+ * 0x1B                 1000L (CCK)
+ * 0x1C                11000S (CCK)
+ * 0x1D                 5500S (CCK)
+ * 0x1E                 2000S (CCK)
+ * 0x1F                -Reserved-
  *
- * rate_code   25      26      27      28      29      30      31      32
- * rate_kbps   5500    2000    1000    11000S  5500S   2000S   ?       ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
  *
  * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
  * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
  * We handle this in ath5k_setup_bands().
  */
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
 #define ATH5K_RATE_CODE_36M    0x0D
 #define ATH5K_RATE_CODE_48M    0x08
 #define ATH5K_RATE_CODE_54M    0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K        0x07
-#define ATH5K_RATE_CODE_XR_1M  0x02
-#define ATH5K_RATE_CODE_XR_2M  0x06
-#define ATH5K_RATE_CODE_XR_3M  0x01
 
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
 #define AR5K_SET_SHORT_PREAMBLE 0x04
 
 /*
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
 
 /**
  * enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ *             not always fatal, on some chips we can continue operation
+ *             without resetting the card, that's why %AR5K_INT_FATAL is not
+ *             common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ *             Queue Control Unit (QCU) signals an EOL interrupt only if a
+ *             descriptor's LinkPtr is NULL. For more details, refer to:
+ *             "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ *             increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
  *
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- *     AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- *     Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- *     LinkPtr is NULL. For more details, refer to:
- *     http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- *     Note that Rx overrun is not always fatal, on some chips we can continue
- *     operation without resetting the card, that's why int_fatal is not
- *     common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- *     AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- *     We currently do increments on interrupt by
- *     (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
  * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- *     one of the PHY error counters reached the maximum value and should be
- *     read and cleared.
+ *             one of the PHY error counters reached the maximum value and
+ *             should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
  * @AR5K_INT_RXPHY: RX PHY Error
  * @AR5K_INT_RXKCM: RX Key cache miss
  * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- *     beacon that must be handled in software. The alternative is if you
- *     have VEOL support, in that case you let the hardware deal with things.
+ *             beacon that must be handled in software. The alternative is if
+ *             you have VEOL support, in that case you let the hardware deal
+ *             with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
  * @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- *     beacons from the AP have associated with, we should probably try to
- *     reassociate. When in IBSS mode this might mean we have not received
- *     any beacons from any local stations. Note that every station in an
- *     IBSS schedules to send beacons at the Target Beacon Transmission Time
- *     (TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- *     until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- *     errors. These types of errors we can enable seem to be of type
- *     AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ *             beacons from the AP have associated with, we should probably
+ *             try to reassociate. When in IBSS mode this might mean we have
+ *             not received any beacons from any local stations. Note that
+ *             every station in an IBSS schedules to send beacons at the
+ *             Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ *             our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ *             nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ *             errors. Indicates we need to reset the card.
  * @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- *     bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ *             bit value
  *
  * These are mapped to take advantage of some common bits
  * between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
        AR5K_INT_GPIO   =       0x01000000,
        AR5K_INT_BCN_TIMEOUT =  0x02000000, /* Non common */
        AR5K_INT_CAB_TIMEOUT =  0x04000000, /* Non common */
-       AR5K_INT_RX_DOPPLER =   0x08000000, /* Non common */
-       AR5K_INT_QCBRORN =      0x10000000, /* Non common */
-       AR5K_INT_QCBRURN =      0x20000000, /* Non common */
-       AR5K_INT_QTRIG  =       0x40000000, /* Non common */
+       AR5K_INT_QCBRORN =      0x08000000, /* Non common */
+       AR5K_INT_QCBRURN =      0x10000000, /* Non common */
+       AR5K_INT_QTRIG  =       0x20000000, /* Non common */
        AR5K_INT_GLOBAL =       0x80000000,
 
        AR5K_INT_TX_ALL = AR5K_INT_TXOK
                | AR5K_INT_TXDESC
                | AR5K_INT_TXERR
+               | AR5K_INT_TXNOFRM
                | AR5K_INT_TXEOL
                | AR5K_INT_TXURN,
 
@@ -891,15 +1074,32 @@ enum ath5k_int {
        AR5K_INT_NOCARD = 0xffffffff
 };
 
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
 enum ath5k_calibration_mask {
        AR5K_CALIBRATION_FULL = 0x01,
        AR5K_CALIBRATION_SHORT = 0x02,
-       AR5K_CALIBRATION_ANI = 0x04,
+       AR5K_CALIBRATION_NF = 0x04,
+       AR5K_CALIBRATION_ANI = 0x08,
 };
 
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
  */
 enum ath5k_power_mode {
        AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
        } cap_queues;
 
        bool cap_has_phyerr_counters;
+       bool cap_has_mrr_support;
+       bool cap_needs_2GHz_ovr;
 };
 
 /* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
        dma_addr_t              desc_daddr;     /* DMA (physical) address */
        size_t                  desc_len;       /* size of TX/RX descriptors */
 
-       DECLARE_BITMAP(status, 6);
+       DECLARE_BITMAP(status, 4);
 #define ATH_STAT_INVALID       0               /* disable hardware accesses */
-#define ATH_STAT_MRRETRY       1               /* multi-rate retry support */
-#define ATH_STAT_PROMISC       2
-#define ATH_STAT_LEDSOFT       3               /* enable LED gpio status */
-#define ATH_STAT_STARTED       4               /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED   5               /* multiband radio without 2G */
+#define ATH_STAT_PROMISC       1
+#define ATH_STAT_LEDSOFT       2               /* enable LED gpio status */
+#define ATH_STAT_STARTED       3               /* opened & irqs enabled */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
        struct ieee80211_channel *curchan;      /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
                                led_on;         /* pin setting for LED on */
 
        struct work_struct      reset_work;     /* deferred chip reset */
+       struct work_struct      calib_work;     /* deferred phy calibration */
 
        struct list_head        rxbuf;          /* receive buffer */
        spinlock_t              rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
 
        struct ath5k_rfkill     rf_kill;
 
-       struct tasklet_struct   calib;          /* calibration tasklet */
-
        spinlock_t              block;          /* protects beacon */
        struct tasklet_struct   beacontq;       /* beacon intr tasklet */
        struct list_head        bcbuf;          /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
        enum ath5k_int          ah_imr;
 
        struct ieee80211_channel *ah_current_channel;
-       bool                    ah_calibration;
+       bool                    ah_iq_cal_needed;
        bool                    ah_single_chip;
 
        enum ath5k_version      ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
        u32                     ah_txq_imr_cbrurn;
        u32                     ah_txq_imr_qtrig;
        u32                     ah_txq_imr_nofrm;
-       u32                     ah_txq_isr;
+
+       u32                     ah_txq_isr_txok_all;
+       u32                     ah_txq_isr_txurn;
+       u32                     ah_txq_isr_qcborn;
+       u32                     ah_txq_isr_qcburn;
+       u32                     ah_txq_isr_qtrig;
+
        u32                     *ah_rf_banks;
        size_t                  ah_rf_banks_size;
        size_t                  ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
 
        /* Calibration timestamp */
        unsigned long           ah_cal_next_full;
+       unsigned long           ah_cal_next_short;
        unsigned long           ah_cal_next_ani;
-       unsigned long           ah_cal_next_nf;
 
        /* Calibration mask */
        u8                      ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
 u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
 void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
 void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+                                                       u32 interval);
 bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
 /* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
-                                                               u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
 
 /* Queue Control Unit, DFS Control Unit Functions */
 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
index 91627dd..d7114c7 100644 (file)
@@ -27,8 +27,7 @@
 #include "debug.h"
 
 /**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
  * @ah: The &struct ath5k_hw
  */
 static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
  * @ah: The &struct ath5k_hw associated with the device
  *
  * Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
 
                /* Reset SERDES to load new settings */
                ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
-               mdelay(1);
+               usleep_range(1000, 1500);
        }
 
        /* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
                goto err;
        }
 
-       if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
-               __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
-               __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
-       }
-
        /* Crypto settings */
        common->keymax = (ah->ah_version == AR5K_AR5210 ?
                          AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
 }
 
 /**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
  * @ah: The &struct ath5k_hw
  */
 void ath5k_hw_deinit(struct ath5k_hw *ah)
index b346d04..178a4dd 100644 (file)
@@ -80,6 +80,11 @@ static int modparam_fastchanswitch;
 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
 
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+                                                               bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
 
 /* Module info */
 MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
        { .bitrate = 540,
          .hw_value = ATH5K_RATE_CODE_54M,
          .flags = 0 },
-       /* XR missing */
 };
 
 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        if (ret)
                goto err_unmap;
 
-       memset(mrr_rate, 0, sizeof(mrr_rate));
-       memset(mrr_tries, 0, sizeof(mrr_tries));
-       for (i = 0; i < 3; i++) {
-               rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
-               if (!rate)
-                       break;
+       /* Set up MRR descriptor */
+       if (ah->ah_capabilities.cap_has_mrr_support) {
+               memset(mrr_rate, 0, sizeof(mrr_rate));
+               memset(mrr_tries, 0, sizeof(mrr_tries));
+               for (i = 0; i < 3; i++) {
+                       rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+                       if (!rate)
+                               break;
 
-               mrr_rate[i] = rate->hw_value;
-               mrr_tries[i] = info->control.rates[i + 1].count;
-       }
+                       mrr_rate[i] = rate->hw_value;
+                       mrr_tries[i] = info->control.rates[i + 1].count;
+               }
 
-       ath5k_hw_setup_mrr_tx_desc(ah, ds,
-               mrr_rate[0], mrr_tries[0],
-               mrr_rate[1], mrr_tries[1],
-               mrr_rate[2], mrr_tries[2]);
+               ath5k_hw_setup_mrr_tx_desc(ah, ds,
+                       mrr_rate[0], mrr_tries[0],
+                       mrr_rate[1], mrr_tries[1],
+                       mrr_rate[2], mrr_tries[2]);
+       }
 
        ds->ds_link = 0;
        ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
        struct ath5k_hw *ah = (void *)data;
 
        for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
-               if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+               if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
                        ath5k_tx_processq(ah, &ah->txqs[i]);
 
        ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
        ah->nexttbtt = nexttbtt;
 
        intval |= AR5K_BEACON_ENA;
-       ath5k_hw_init_beacon(ah, nexttbtt, intval);
+       ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
 
        /*
         * debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
 {
        if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
-           !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
-               /* run ANI only when full calibration is not active */
+          !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+          !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+               /* Run ANI only when calibration is not active */
+
                ah->ah_cal_next_ani = jiffies +
                        msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
                tasklet_schedule(&ah->ani_tasklet);
 
-       } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
-               ah->ah_cal_next_full = jiffies +
-                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
-               tasklet_schedule(&ah->calib);
+       } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+               !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+               !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+               /* Run calibration only when another calibration
+                * is not running.
+                *
+                * Note: This is for both full/short calibration,
+                * if it's time for a full one, ath5k_calibrate_work will deal
+                * with it. */
+
+               ah->ah_cal_next_short = jiffies +
+                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+               ieee80211_queue_work(ah->hw, &ah->calib_work);
        }
        /* we could use SWI to generate enough interrupts to meet our
         * calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
        enum ath5k_int status;
        unsigned int counter = 1000;
 
+
+       /*
+        * If hw is not ready (or detached) and we get an
+        * interrupt, or if we have no interrupts pending
+        * (that means it's not for us) skip it.
+        *
+        * NOTE: Group 0/1 PCI interface registers are not
+        * supported on WiSOCs, so we can't check for pending
+        * interrupts (ISR belongs to another register group
+        * so we are ok).
+        */
        if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
-               ((ath5k_get_bus_type(ah) != ATH_AHB) &&
-                               !ath5k_hw_is_intr_pending(ah))))
+                       ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+                       !ath5k_hw_is_intr_pending(ah))))
                return IRQ_NONE;
 
+       /** Main loop **/
        do {
-               ath5k_hw_get_isr(ah, &status);          /* NB: clears IRQ too */
+               ath5k_hw_get_isr(ah, &status);  /* NB: clears IRQ too */
+
                ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
                                status, ah->imask);
+
+               /*
+                * Fatal hw error -> Log and reset
+                *
+                * Fatal errors are unrecoverable so we have to
+                * reset the card. These errors include bus and
+                * dma errors.
+                */
                if (unlikely(status & AR5K_INT_FATAL)) {
-                       /*
-                        * Fatal errors are unrecoverable.
-                        * Typically these are caused by DMA errors.
-                        */
+
                        ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                                  "fatal int, resetting\n");
                        ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+               /*
+                * RX Overrun -> Count and reset if needed
+                *
+                * Receive buffers are full. Either the bus is busy or
+                * the CPU is not fast enough to process all received
+                * frames.
+                */
                } else if (unlikely(status & AR5K_INT_RXORN)) {
+
                        /*
-                        * Receive buffers are full. Either the bus is busy or
-                        * the CPU is not fast enough to process all received
-                        * frames.
                         * Older chipsets need a reset to come out of this
                         * condition, but we treat it as RX for newer chips.
-                        * We don't know exactly which versions need a reset -
+                        * We don't know exactly which versions need a reset
                         * this guess is copied from the HAL.
                         */
                        ah->stats.rxorn_intr++;
+
                        if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
                                ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
                                          "rx overrun, resetting\n");
                                ieee80211_queue_work(ah->hw, &ah->reset_work);
                        } else
                                ath5k_schedule_rx(ah);
+
                } else {
+
+                       /* Software Beacon Alert -> Schedule beacon tasklet */
                        if (status & AR5K_INT_SWBA)
                                tasklet_hi_schedule(&ah->beacontq);
 
-                       if (status & AR5K_INT_RXEOL) {
-                               /*
-                               * NB: the hardware should re-read the link when
-                               *     RXE bit is written, but it doesn't work at
-                               *     least on older hardware revs.
-                               */
+                       /*
+                        * No more RX descriptors -> Just count
+                        *
+                        * NB: the hardware should re-read the link when
+                        *     RXE bit is written, but it doesn't work at
+                        *     least on older hardware revs.
+                        */
+                       if (status & AR5K_INT_RXEOL)
                                ah->stats.rxeol_intr++;
-                       }
-                       if (status & AR5K_INT_TXURN) {
-                               /* bump tx trigger level */
+
+
+                       /* TX Underrun -> Bump tx trigger level */
+                       if (status & AR5K_INT_TXURN)
                                ath5k_hw_update_tx_triglevel(ah, true);
-                       }
+
+                       /* RX -> Schedule rx tasklet */
                        if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
                                ath5k_schedule_rx(ah);
-                       if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
-                                       | AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+                       /* TX -> Schedule tx tasklet */
+                       if (status & (AR5K_INT_TXOK
+                                       | AR5K_INT_TXDESC
+                                       | AR5K_INT_TXERR
+                                       | AR5K_INT_TXEOL))
                                ath5k_schedule_tx(ah);
-                       if (status & AR5K_INT_BMISS) {
-                               /* TODO */
-                       }
+
+                       /* Missed beacon -> TODO
+                       if (status & AR5K_INT_BMISS)
+                       */
+
+                       /* MIB event -> Update counters and notify ANI */
                        if (status & AR5K_INT_MIB) {
                                ah->stats.mib_intr++;
                                ath5k_hw_update_mib_counters(ah);
                                ath5k_ani_mib_intr(ah);
                        }
+
+                       /* GPIO -> Notify RFKill layer */
                        if (status & AR5K_INT_GPIO)
                                tasklet_schedule(&ah->rf_kill.toggleq);
 
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
 
        } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
 
+       /*
+        * Until we handle rx/tx interrupts mask them on IMR
+        *
+        * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+        * and unset after we 've handled the interrupts.
+        */
        if (ah->rx_pending || ah->tx_pending)
                ath5k_set_current_imask(ah);
 
        if (unlikely(!counter))
                ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
 
+       /* Fire up calibration poll */
        ath5k_intr_calibration_poll(ah);
 
        return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
  * for temperature/environment changes.
  */
 static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
 {
-       struct ath5k_hw *ah = (void *)data;
+       struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+               calib_work);
+
+       /* Should we run a full calibration ? */
+       if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+               ah->ah_cal_next_full = jiffies +
+                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+               ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+               ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+                               "running full calibration\n");
+
+               if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+                       /*
+                        * Rfgain is out of bounds, reset the chip
+                        * to load new gain values.
+                        */
+                       ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+                                       "got new rfgain, resetting\n");
+                       ieee80211_queue_work(ah->hw, &ah->reset_work);
+               }
+
+               /* TODO: On full calibration we should stop TX here,
+                * so that it doesn't interfere (mostly due to gain_f
+                * calibration that messes with tx packets -see phy.c).
+                *
+                * NOTE: Stopping the queues from above is not enough
+                * to stop TX but saves us from disconecting (at least
+                * we don't lose packets). */
+               ieee80211_stop_queues(ah->hw);
+       } else
+               ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
 
-       /* Only full calibration for now */
-       ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
 
        ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
                ieee80211_frequency_to_channel(ah->curchan->center_freq),
                ah->curchan->hw_value);
 
-       if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
-               /*
-                * Rfgain is out of bounds, reset the chip
-                * to load new gain values.
-                */
-               ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
-               ieee80211_queue_work(ah->hw, &ah->reset_work);
-       }
        if (ath5k_hw_phy_calibrate(ah, ah->curchan))
                ATH5K_ERR(ah, "calibration of channel %u failed\n",
                        ieee80211_frequency_to_channel(
                                ah->curchan->center_freq));
 
-       /* Noise floor calibration interrupts rx/tx path while I/Q calibration
-        * doesn't.
-        * TODO: We should stop TX here, so that it doesn't interfere.
-        * Note that stopping the queues is not enough to stop TX! */
-       if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
-               ah->ah_cal_next_nf = jiffies +
-                       msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
-               ath5k_hw_update_noise_floor(ah);
-       }
-
-       ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+       /* Clear calibration flags */
+       if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+               ieee80211_wake_queues(ah->hw);
+               ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+       } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+               ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
 }
 
 
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
        if (ret)
                goto err_irq;
 
-       /* set up multi-rate retry capabilities */
-       if (ah->ah_version == AR5K_AR5212) {
+       /* Set up multi-rate retry capabilities */
+       if (ah->ah_capabilities.cap_has_mrr_support) {
                hw->max_rates = 4;
                hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
                                         AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
         * and then setup of the interrupt mask.
         */
        ah->curchan = ah->hw->conf.channel;
-       ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
-               AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
-               AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+       ah->imask = AR5K_INT_RXOK
+               | AR5K_INT_RXERR
+               | AR5K_INT_RXEOL
+               | AR5K_INT_RXORN
+               | AR5K_INT_TXDESC
+               | AR5K_INT_TXEOL
+               | AR5K_INT_FATAL
+               | AR5K_INT_GLOBAL
+               | AR5K_INT_MIB;
 
        ret = ath5k_reset(ah, NULL, false);
        if (ret)
                goto done;
 
-       ath5k_rfkill_hw_start(ah);
+       if (!ath5k_modparam_no_hw_rfkill_switch)
+               ath5k_rfkill_hw_start(ah);
 
        /*
         * Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
        ah->tx_pending = false;
        tasklet_kill(&ah->rxtq);
        tasklet_kill(&ah->txtq);
-       tasklet_kill(&ah->calib);
        tasklet_kill(&ah->beacontq);
        tasklet_kill(&ah->ani_tasklet);
 }
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
 
        cancel_delayed_work_sync(&ah->tx_complete_work);
 
-       ath5k_rfkill_hw_stop(ah);
+       if (!ath5k_modparam_no_hw_rfkill_switch)
+               ath5k_rfkill_hw_stop(ah);
 }
 
 /*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
 
        ath5k_ani_init(ah, ani_mode);
 
-       ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
-       ah->ah_cal_next_ani = jiffies;
-       ah->ah_cal_next_nf = jiffies;
+       /*
+        * Set calibration intervals
+        *
+        * Note: We don't need to run calibration imediately
+        * since some initial calibration is done on reset
+        * even for fast channel switching. Also on scanning
+        * this will get set again and again and it won't get
+        * executed unless we connect somewhere and spend some
+        * time on the channel (that's what calibration needs
+        * anyway to be accurate).
+        */
+       ah->ah_cal_next_full = jiffies +
+               msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+       ah->ah_cal_next_ani = jiffies +
+               msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+       ah->ah_cal_next_short = jiffies +
+               msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
        ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
 
        /* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw)
 
 
        /*
-        * Check if the MAC has multi-rate retry support.
-        * We do this by trying to setup a fake extended
-        * descriptor.  MACs that don't have support will
-        * return false w/o doing anything.  MACs that do
-        * support it will return true w/o doing anything.
-        */
-       ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
-       if (ret < 0)
-               goto err;
-       if (ret > 0)
-               __set_bit(ATH_STAT_MRRETRY, ah->status);
-
-       /*
         * Collect the channel list.  The 802.11 layer
         * is responsible for filtering this list based
         * on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
 
        tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
        tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
-       tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
        tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
        tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
 
        INIT_WORK(&ah->reset_work, ath5k_reset_work);
+       INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
        INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
 
        ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
index 810fba9..994169a 100644 (file)
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
                        caps->cap_range.range_2ghz_min = 2412;
                        caps->cap_range.range_2ghz_max = 2732;
 
-                       if (AR5K_EEPROM_HDR_11B(ee_header))
-                               __set_bit(AR5K_MODE_11B, caps->cap_mode);
-
-                       if (AR5K_EEPROM_HDR_11G(ee_header) &&
-                           ah->ah_version != AR5K_AR5211)
-                               __set_bit(AR5K_MODE_11G, caps->cap_mode);
+                       /* Override 2GHz modes on SoCs that need it
+                        * NOTE: cap_needs_2GHz_ovr gets set from
+                        * ath_ahb_probe */
+                       if (!caps->cap_needs_2GHz_ovr) {
+                               if (AR5K_EEPROM_HDR_11B(ee_header))
+                                       __set_bit(AR5K_MODE_11B,
+                                                       caps->cap_mode);
+
+                               if (AR5K_EEPROM_HDR_11G(ee_header) &&
+                               ah->ah_version != AR5K_AR5211)
+                                       __set_bit(AR5K_MODE_11G,
+                                                       caps->cap_mode);
+                       }
                }
        }
 
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
        else
                caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
 
-       /* newer hardware has PHY error counters */
+       /* Newer hardware has PHY error counters */
        if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
                caps->cap_has_phyerr_counters = true;
        else
                caps->cap_has_phyerr_counters = false;
 
+       /* MACs since AR5212 have MRR support */
+       if (ah->ah_version == AR5K_AR5212)
+               caps->cap_has_mrr_support = true;
+       else
+               caps->cap_has_mrr_support = false;
+
        return 0;
 }
 
index 7e88dda..f8bfa3a 100644 (file)
 #include "debug.h"
 
 
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
 /************************\
 * TX Control descriptors *
 \************************/
 
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
  */
 static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-       unsigned int pkt_len, unsigned int hdr_len, int padsize,
-       enum ath5k_pkt_type type,
-       unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
-       unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
-       unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       unsigned int pkt_len, unsigned int hdr_len,
+                       int padsize,
+                       enum ath5k_pkt_type type,
+                       unsigned int tx_power,
+                       unsigned int tx_rate0, unsigned int tx_tries0,
+                       unsigned int key_index,
+                       unsigned int antenna_mode,
+                       unsigned int flags,
+                       unsigned int rtscts_rate, unsigned int rtscts_duration)
 {
        u32 frame_type;
        struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
        return 0;
 }
 
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
  */
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
-       struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
-       int padsize,
-       enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
-       unsigned int tx_tries0, unsigned int key_index,
-       unsigned int antenna_mode, unsigned int flags,
-       unsigned int rtscts_rate,
-       unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       unsigned int pkt_len, unsigned int hdr_len,
+                       int padsize,
+                       enum ath5k_pkt_type type,
+                       unsigned int tx_power,
+                       unsigned int tx_rate0, unsigned int tx_tries0,
+                       unsigned int key_index,
+                       unsigned int antenna_mode,
+                       unsigned int flags,
+                       unsigned int rtscts_rate, unsigned int rtscts_duration)
 {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
        unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
  */
 int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-       unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
-       u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       u_int tx_rate1, u_int tx_tries1,
+                       u_int tx_rate2, u_int tx_tries2,
+                       u_int tx_rate3, u_int tx_tries3)
 {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
 
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
 * TX Status descriptors *
 \***********************/
 
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
  */
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
-               struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_tx_status *ts)
 {
        struct ath5k_hw_2w_tx_ctl *tx_ctl;
        struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
  */
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
-               struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_tx_status *ts)
 {
        struct ath5k_hw_4w_tx_ctl *tx_ctl;
        struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
 * RX Descriptors *
 \****************/
 
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
  */
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
-                          u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+                       struct ath5k_desc *desc,
+                       u32 size, unsigned int flags)
 {
        struct ath5k_hw_rx_ctl *rx_ctl;
 
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
        return 0;
 }
 
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
  */
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
-               struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_rx_status *rs)
 {
        struct ath5k_hw_rx_status *rx_status;
 
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
  */
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
-                                       struct ath5k_desc *desc,
-                                       struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+                               struct ath5k_desc *desc,
+                               struct ath5k_rx_status *rs)
 {
        struct ath5k_hw_rx_status *rx_status;
        u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
 * Attach *
 \********/
 
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
  */
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
 {
        if (ah->ah_version == AR5K_AR5212) {
                ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
index cfd529b..8d6c01a 100644 (file)
  * RX/TX descriptor structures
  */
 
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
  */
 struct ath5k_hw_rx_ctl {
-       u32     rx_control_0; /* RX control word 0 */
-       u32     rx_control_1; /* RX control word 1 */
+       u32     rx_control_0;
+       u32     rx_control_1;
 } __packed __aligned(4);
 
 /* RX control word 1 fields/flags */
 #define AR5K_DESC_RX_CTL1_BUF_LEN              0x00000fff /* data buffer length */
 #define AR5K_DESC_RX_CTL1_INTREQ               0x00002000 /* RX interrupt request */
 
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
  * 5210, 5211 and 5212 differ only in the fields and flags defined below
  */
 struct ath5k_hw_rx_status {
-       u32     rx_status_0; /* RX status word 0 */
-       u32     rx_status_1; /* RX status word 1 */
+       u32     rx_status_0;
+       u32     rx_status_1;
 } __packed __aligned(4);
 
 /* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
 
 /**
  * enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
  */
 enum ath5k_phy_error_code {
-       AR5K_RX_PHY_ERROR_UNDERRUN              = 0,    /* Transmit underrun, [5210] No error */
-       AR5K_RX_PHY_ERROR_TIMING                = 1,    /* Timing error */
-       AR5K_RX_PHY_ERROR_PARITY                = 2,    /* Illegal parity */
-       AR5K_RX_PHY_ERROR_RATE                  = 3,    /* Illegal rate */
-       AR5K_RX_PHY_ERROR_LENGTH                = 4,    /* Illegal length */
-       AR5K_RX_PHY_ERROR_RADAR                 = 5,    /* Radar detect, [5210] 64 QAM rate */
-       AR5K_RX_PHY_ERROR_SERVICE               = 6,    /* Illegal service */
-       AR5K_RX_PHY_ERROR_TOR                   = 7,    /* Transmit override receive */
-       /* these are specific to the 5212 */
+       AR5K_RX_PHY_ERROR_UNDERRUN              = 0,
+       AR5K_RX_PHY_ERROR_TIMING                = 1,
+       AR5K_RX_PHY_ERROR_PARITY                = 2,
+       AR5K_RX_PHY_ERROR_RATE                  = 3,
+       AR5K_RX_PHY_ERROR_LENGTH                = 4,
+       AR5K_RX_PHY_ERROR_RADAR                 = 5,
+       AR5K_RX_PHY_ERROR_SERVICE               = 6,
+       AR5K_RX_PHY_ERROR_TOR                   = 7,
        AR5K_RX_PHY_ERROR_OFDM_TIMING           = 17,
        AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY    = 18,
        AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL     = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
        AR5K_RX_PHY_ERROR_CCK_RESTART           = 31,
 };
 
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl  - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
  */
 struct ath5k_hw_2w_tx_ctl {
-       u32     tx_control_0; /* TX control word 0 */
-       u32     tx_control_1; /* TX control word 1 */
+       u32     tx_control_0;
+       u32     tx_control_1;
 } __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
 #define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS    4
 #define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP   4
 
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
  */
 struct ath5k_hw_4w_tx_ctl {
-       u32     tx_control_0; /* TX control word 0 */
-       u32     tx_control_1; /* TX control word 1 */
-       u32     tx_control_2; /* TX control word 2 */
-       u32     tx_control_3; /* TX control word 3 */
+       u32     tx_control_0;
+       u32     tx_control_1;
+       u32     tx_control_2;
+       u32     tx_control_3;
 } __packed __aligned(4);
 
 /* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE      0x01f00000 /* RTS or CTS rate */
 #define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S    20
 
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
  */
 struct ath5k_hw_tx_status {
-       u32     tx_status_0; /* TX status word 0 */
-       u32     tx_status_1; /* TX status word 1 */
+       u32     tx_status_0;
+       u32     tx_status_1;
 } __packed __aligned(4);
 
 /* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
 #define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
 #define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
 
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
  */
 struct ath5k_hw_5210_tx_desc {
        struct ath5k_hw_2w_tx_ctl       tx_ctl;
        struct ath5k_hw_tx_status       tx_stat;
 } __packed __aligned(4);
 
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
  */
 struct ath5k_hw_5212_tx_desc {
        struct ath5k_hw_4w_tx_ctl       tx_ctl;
        struct ath5k_hw_tx_status       tx_stat;
 } __packed __aligned(4);
 
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
  */
 struct ath5k_hw_all_rx_desc {
        struct ath5k_hw_rx_ctl          rx_ctl;
        struct ath5k_hw_rx_status       rx_stat;
 } __packed __aligned(4);
 
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
  * This is read and written to by the hardware
  */
 struct ath5k_desc {
-       u32     ds_link;        /* physical address of the next descriptor */
-       u32     ds_data;        /* physical address of data buffer (skb) */
+       u32     ds_link;
+       u32     ds_data;
 
        union {
                struct ath5k_hw_5210_tx_desc    ds_tx5210;
index 2481f9c..5cc9aa8 100644 (file)
 * DMA and interrupt masking functions *
 \*************************************/
 
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
  *
  * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
  * handle queue setup for 5210 chipset (rest are handled on qcu.c).
  * Also we setup interrupt mask register (IMR) and read the various interrupt
  * status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
  */
 
 #include "ath5k.h"
 \*********/
 
 /**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
  * @ah:        The &struct ath5k_hw
  */
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
 {
        ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
        ath5k_hw_reg_read(ah, AR5K_CR);
 }
 
 /**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
  * @ah:        The &struct ath5k_hw
  */
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
 {
        unsigned int i;
 
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
  * @ah: The &struct ath5k_hw
  */
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
 {
        return ath5k_hw_reg_read(ah, AR5K_RXDP);
 }
 
 /**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
  * @ah: The &struct ath5k_hw
  * @phys_addr: RX descriptor address
  *
  * Returns -EIO if rx is active
  */
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
 {
        if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
                ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
 \**********/
 
 /**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
  * NOTE: Must be called after setting up tx control descriptor for that
  * queue (see below).
  */
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 {
        u32 tx_queue;
 
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
  * Stop DMA transmit on a specific hw queue and drain queue so we don't
  * have any pending frames. Returns -EBUSY if we still have pending frames,
  * -EINVAL if queue number is out of range or inactive.
- *
  */
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 {
        unsigned int i = 40;
        u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
  *
  * Returns -EIO if queue didn't stop
  */
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
 {
        int ret;
        ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
  *
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
  *
  * XXX: Is TXDP read and clear ?
  */
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
 {
        u16 tx_reg;
 
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
 }
 
 /**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
  * @ah: The &struct ath5k_hw
  * @queue: The hw queue number
+ * @phys_addr: The physical address
  *
  * Set TX descriptor's address for a specific queue. For 5210 we ignore
  * the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
  * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
  * active.
  */
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
 {
        u16 tx_reg;
 
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
 }
 
 /**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
  * @ah: The &struct ath5k_hw
  * @increase: Flag to force increase of trigger level
  *
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
  * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
  * the buffer and transmits its data. Lowering this results sending small
  * frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
  *
  * XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
  */
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
 {
        u32 trigger_level, imr;
        int ret = -EIO;
@@ -498,21 +494,20 @@ done:
 \*******************/
 
 /**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
  * @ah: The &struct ath5k_hw
  *
  * Check if we have pending interrupts to process. Returns 1 if we
  * have pending interrupts and 0 if we haven't.
  */
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
 {
        return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
 }
 
 /**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
  * @ah: The @struct ath5k_hw
  * @interrupt_mask: Driver's interrupt mask used to filter out
  * interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
  * being mapped on some standard non hw-specific positions
  * (check out &ath5k_int).
  *
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
  */
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
 {
-       u32 data;
+       u32 data = 0;
 
        /*
-        * Read interrupt status from the Interrupt Status register
-        * on 5210
+        * Read interrupt status from Primary Interrupt
+        * Register.
+        *
+        * Note: PISR/SISR Not available on 5210
         */
        if (ah->ah_version == AR5K_AR5210) {
-               data = ath5k_hw_reg_read(ah, AR5K_ISR);
-               if (unlikely(data == AR5K_INT_NOCARD)) {
-                       *interrupt_mask = data;
+               u32 isr = 0;
+               isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+               if (unlikely(isr == AR5K_INT_NOCARD)) {
+                       *interrupt_mask = isr;
                        return -ENODEV;
                }
-       } else {
+
                /*
-                * Read interrupt status from Interrupt
-                * Status Register shadow copy (Read And Clear)
-                *
-                * Note: PISR/SISR Not available on 5210
+                * Filter out the non-common bits from the interrupt
+                * status.
                 */
-               data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
-               if (unlikely(data == AR5K_INT_NOCARD)) {
-                       *interrupt_mask = data;
+               *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+               /* Hanlde INT_FATAL */
+               if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+                                               | AR5K_ISR_DPERR)))
+                       *interrupt_mask |= AR5K_INT_FATAL;
+
+               /*
+                * XXX: BMISS interrupts may occur after association.
+                * I found this on 5210 code but it needs testing. If this is
+                * true we should disable them before assoc and re-enable them
+                * after a successful assoc + some jiffies.
+                       interrupt_mask &= ~AR5K_INT_BMISS;
+                */
+
+               data = isr;
+       } else {
+               u32 pisr = 0;
+               u32 pisr_clear = 0;
+               u32 sisr0 = 0;
+               u32 sisr1 = 0;
+               u32 sisr2 = 0;
+               u32 sisr3 = 0;
+               u32 sisr4 = 0;
+
+               /* Read PISR and SISRs... */
+               pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+               if (unlikely(pisr == AR5K_INT_NOCARD)) {
+                       *interrupt_mask = pisr;
                        return -ENODEV;
                }
-       }
 
-       /*
-        * Get abstract interrupt mask (driver-compatible)
-        */
-       *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+               sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+               sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+               sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+               sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+               sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
 
-       if (ah->ah_version != AR5K_AR5210) {
-               u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+               /*
+                * PISR holds the logical OR of interrupt bits
+                * from SISR registers:
+                *
+                * TXOK and TXDESC  -> Logical OR of TXOK and TXDESC
+                *                      per-queue bits on SISR0
+                *
+                * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+                *                      per-queue bits on SISR1
+                *
+                * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+                *
+                * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+                *
+                * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+                *              BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+                *              (and TSFOOR ?) bits on SISR2
+                *
+                * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+                *                      QCBRURN per-queue bits on SISR3
+                * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+                *
+                * If we clean these bits on PISR we 'll also clear all
+                * related bits from SISRs, e.g. if we write the TXOK bit on
+                * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+                * interrupt got fired for another queue while we were reading
+                * the interrupt registers and we write back the TXOK bit on
+                * PISR we 'll lose it. So make sure that we don't write back
+                * on PISR any bits that come from SISRs. Clearing them from
+                * SISRs will also clear PISR so no need to worry here.
+                */
 
-               /*HIU = Host Interface Unit (PCI etc)*/
-               if (unlikely(data & (AR5K_ISR_HIUERR)))
-                       *interrupt_mask |= AR5K_INT_FATAL;
+               pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
 
-               /*Beacon Not Ready*/
-               if (unlikely(data & (AR5K_ISR_BNR)))
-                       *interrupt_mask |= AR5K_INT_BNR;
+               /*
+                * Write to clear them...
+                * Note: This means that each bit we write back
+                * to the registers will get cleared, leaving the
+                * rest unaffected. So this won't affect new interrupts
+                * we didn't catch while reading/processing, we 'll get
+                * them next time get_isr gets called.
+                */
+               ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+               ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+               ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+               ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+               ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+               ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+               /* Flush previous write */
+               ath5k_hw_reg_read(ah, AR5K_PISR);
 
-               if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
-                                       AR5K_SISR2_DPERR |
-                                       AR5K_SISR2_MCABT)))
-                       *interrupt_mask |= AR5K_INT_FATAL;
+               /*
+                * Filter out the non-common bits from the interrupt
+                * status.
+                */
+               *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+               /* We treat TXOK,TXDESC, TXERR and TXEOL
+                * the same way (schedule the tx tasklet)
+                * so we track them all together per queue */
+               if (pisr & AR5K_ISR_TXOK)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+                                               AR5K_SISR0_QCU_TXOK);
 
-               if (data & AR5K_ISR_TIM)
+               if (pisr & AR5K_ISR_TXDESC)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+                                               AR5K_SISR0_QCU_TXDESC);
+
+               if (pisr & AR5K_ISR_TXERR)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+                                               AR5K_SISR1_QCU_TXERR);
+
+               if (pisr & AR5K_ISR_TXEOL)
+                       ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+                                               AR5K_SISR1_QCU_TXEOL);
+
+               /* Currently this is not much usefull since we treat
+                * all queues the same way if we get a TXURN (update
+                * tx trigger level) but we might need it later on*/
+               if (pisr & AR5K_ISR_TXURN)
+                       ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+                                               AR5K_SISR2_QCU_TXURN);
+
+               /* Misc Beacon related interrupts */
+
+               /* For AR5211 */
+               if (pisr & AR5K_ISR_TIM)
                        *interrupt_mask |= AR5K_INT_TIM;
 
-               if (data & AR5K_ISR_BCNMISC) {
+               /* For AR5212+ */
+               if (pisr & AR5K_ISR_BCNMISC) {
                        if (sisr2 & AR5K_SISR2_TIM)
                                *interrupt_mask |= AR5K_INT_TIM;
                        if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
                                *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
                }
 
-               if (data & AR5K_ISR_RXDOPPLER)
-                       *interrupt_mask |= AR5K_INT_RX_DOPPLER;
-               if (data & AR5K_ISR_QCBRORN) {
+               /* Below interrupts are unlikely to happen */
+
+               /* HIU = Host Interface Unit (PCI etc)
+                * Can be one of MCABT, SSERR, DPERR from SISR2 */
+               if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+                       *interrupt_mask |= AR5K_INT_FATAL;
+
+               /*Beacon Not Ready*/
+               if (unlikely(pisr & (AR5K_ISR_BNR)))
+                       *interrupt_mask |= AR5K_INT_BNR;
+
+               /* A queue got CBR overrun */
+               if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
                        *interrupt_mask |= AR5K_INT_QCBRORN;
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
-                                       AR5K_SISR3_QCBRORN);
+                       ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+                                               AR5K_SISR3_QCBRORN);
                }
-               if (data & AR5K_ISR_QCBRURN) {
+
+               /* A queue got CBR underrun */
+               if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
                        *interrupt_mask |= AR5K_INT_QCBRURN;
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
-                                       AR5K_SISR3_QCBRURN);
+                       ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+                                               AR5K_SISR3_QCBRURN);
                }
-               if (data & AR5K_ISR_QTRIG) {
+
+               /* A queue got triggered */
+               if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
                        *interrupt_mask |= AR5K_INT_QTRIG;
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
-                                       AR5K_SISR4_QTRIG);
+                       ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+                                               AR5K_SISR4_QTRIG);
                }
 
-               if (data & AR5K_ISR_TXOK)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
-                                       AR5K_SISR0_QCU_TXOK);
-
-               if (data & AR5K_ISR_TXDESC)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
-                                       AR5K_SISR0_QCU_TXDESC);
-
-               if (data & AR5K_ISR_TXERR)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
-                                       AR5K_SISR1_QCU_TXERR);
-
-               if (data & AR5K_ISR_TXEOL)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
-                                       AR5K_SISR1_QCU_TXEOL);
-
-               if (data & AR5K_ISR_TXURN)
-                       ah->ah_txq_isr |= AR5K_REG_MS(
-                                       ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
-                                       AR5K_SISR2_QCU_TXURN);
-       } else {
-               if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
-                               | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
-                       *interrupt_mask |= AR5K_INT_FATAL;
-
-               /*
-                * XXX: BMISS interrupts may occur after association.
-                * I found this on 5210 code but it needs testing. If this is
-                * true we should disable them before assoc and re-enable them
-                * after a successful assoc + some jiffies.
-                       interrupt_mask &= ~AR5K_INT_BMISS;
-                */
+               data = pisr;
        }
 
        /*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
 }
 
 /**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
  * @ah: The &struct ath5k_hw
  * @new_mask: The new interrupt mask to be set
  *
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
  * ath5k_int bits to hw-specific bits to remove abstraction and writing
  * Interrupt Mask Register.
  */
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
 {
        enum ath5k_int old_mask, int_mask;
 
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
                u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
                                & AR5K_SIMR2_QCU_TXURN;
 
+               /* Fatal interrupt abstraction for 5211+ */
                if (new_mask & AR5K_INT_FATAL) {
                        int_mask |= AR5K_IMR_HIUERR;
                        simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
                                | AR5K_SIMR2_DPERR);
                }
 
-               /*Beacon Not Ready*/
-               if (new_mask & AR5K_INT_BNR)
-                       int_mask |= AR5K_INT_BNR;
-
+               /* Misc beacon related interrupts */
                if (new_mask & AR5K_INT_TIM)
                        int_mask |= AR5K_IMR_TIM;
 
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
                if (new_mask & AR5K_INT_CAB_TIMEOUT)
                        simr2 |= AR5K_SISR2_CAB_TIMEOUT;
 
-               if (new_mask & AR5K_INT_RX_DOPPLER)
-                       int_mask |= AR5K_IMR_RXDOPPLER;
+               /*Beacon Not Ready*/
+               if (new_mask & AR5K_INT_BNR)
+                       int_mask |= AR5K_INT_BNR;
 
                /* Note: Per queue interrupt masks
                 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
                ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
 
        } else {
+               /* Fatal interrupt abstraction for 5210 */
                if (new_mask & AR5K_INT_FATAL)
                        int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
                                | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
 
+               /* Only common interrupts left for 5210 (no SIMRs) */
                ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
        }
 
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
 \********************/
 
 /**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
  * @ah: The &struct ath5k_hw
  *
  * Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
  *
  * XXX: Save/restore RXDP/TXDP registers ?
  */
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
 {
        /*
         * Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
  * @ah: The &struct ath5k_hw
  *
  * Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
  * stuck frames on tx queues, only a reset
  * can fix that.
  */
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
 {
        int i, qmax, err;
        err = 0;
index 8592978..73d3dd8 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
  */
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
 {
        u32 led;
        /*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
                AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
 }
 
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
  */
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
 {
        if (gpio >= AR5K_NUM_GPIO)
                return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
        return 0;
 }
 
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
  */
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
 {
        if (gpio >= AR5K_NUM_GPIO)
                return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
        return 0;
 }
 
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
  */
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
 {
        if (gpio >= AR5K_NUM_GPIO)
                return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
                0x1;
 }
 
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
  */
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
 {
        u32 data;
 
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
        return 0;
 }
 
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
  */
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
                u32 interrupt_level)
 {
        u32 data;
index 1ffecc0..a1ea78e 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
  */
-
 struct ath5k_ini {
        u16     ini_register;
        u32     ini_value;
 
        enum {
                AR5K_INI_WRITE = 0,     /* Default */
-               AR5K_INI_READ = 1,      /* Cleared on read */
+               AR5K_INI_READ = 1,
        } ini_mode;
 };
 
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
  */
-
 struct ath5k_ini_mode {
        u16     mode_register;
        u32     mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
 
 /* Initial mode-specific settings for AR5211
  * 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
 static const struct ath5k_ini_mode ar5211_ini_mode[] = {
        { AR5K_TXCFG,
-       /*      A/XR          B           G       */
+       /*      A          B           G       */
           { 0x00000015, 0x0000001d, 0x00000015 } },
        { AR5K_QUEUE_DFS_LOCAL_IFS(0),
           { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
           { 0x00000010, 0x00000010, 0x00000010 } },
 };
 
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
 static const struct ath5k_ini ar5212_ini_common_start[] = {
        { AR5K_RXDP,            0x00000000 },
        { AR5K_RXCFG,           0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
           { 0x00000000, 0x00000000, 0x00000108 } },
 };
 
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
        { AR5K_TXCFG,
        /*      A/XR          B           G       */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
           { 0x1883800a, 0x1873800a, 0x1883800a } },
 };
 
+/* Common for all modes */
 static const struct ath5k_ini rf5111_ini_common_end[] = {
        { AR5K_DCU_FP,          0x00000000 },
        { AR5K_PHY_AGC,         0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
        { 0xa23c,               0x13c889af },
 };
 
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
        { AR5K_TXCFG,
        /*      A/XR          B           G       */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
        { 0xa23c,               0x13c889af },
 };
 
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
 static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
        { AR5K_TXCFG,
        /*      A/XR          B           G       */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
        { 0xa384, 0xf3307ff0 },
 };
 
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
 /* XXX: a mode ? */
 static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
        { AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
        { 0xa384, 0xf3307ff0 },
 };
 
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
 /* XXX: a mode ? */
 static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
        { AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
 };
 
 
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
  */
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
                const struct ath5k_ini *ini_regs, bool skip_pcu)
 {
        unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
        }
 }
 
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
                unsigned int size, const struct ath5k_ini_mode *ini_mode,
                u8 mode)
 {
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
 
 }
 
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
 {
        /*
         * Write initial register settings
index dfa48eb..849fa06 100644 (file)
@@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
                                        0xffff);
                        return true;
                }
-               udelay(15);
+               usleep_range(15, 20);
        }
 
        return false;
index a7eafa3..cebfd6f 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
  * AR5212+ can use higher rates for ack transmission
  * based on current tx rate instead of the base rate.
  * It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
  * CCK and OFDM) and ack rates that we use when setting
  * rate -> duration table. This mapping is hw-based so
  * don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
 \*******************/
 
 /**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
  * @ah: The &struct ath5k_hw
  * @len: Frame's length in bytes
  * @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
  *
  * Calculate tx duration of a frame given it's rate and length
  * It extends ieee80211_generic_frame_duration for non standard
  * bwmodes.
  */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
                int len, struct ieee80211_rate *rate, bool shortpre)
 {
        int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
 }
 
 /**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
  * @ah: The &struct ath5k_hw
  */
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
  * @ah: The &struct ath5k_hw
  */
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
  * @ah: The &struct ath5k_hw
  *
  * Reads MIB counters from PCU and updates sw statistics. Is called after a
  * MIB interrupt, because one of these counters might have reached their maximum
  * and triggered the MIB interrupt, to let us read and clear the counter.
  *
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
  */
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
 {
        struct ath5k_statistics *stats = &ah->stats;
 
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
 \******************/
 
 /**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
  *
  * Write the rate code to duration table upon hw reset. This is a helper for
  * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
  * that include all OFDM and CCK rates.
  *
  */
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
 {
        struct ieee80211_rate *rate;
        unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
  * @ah: The &struct ath5k_hw
  * @timeout: Timeout in usec
  */
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
        if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
                        <= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
 }
 
 /**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
  * @ah: The &struct ath5k_hw
  * @timeout: Timeout in usec
  */
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 {
        if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
                        <= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
 \*******************/
 
 /**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
  * @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
  *
  * Set station id on hw using the provided mac address
  */
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
 }
 
 /**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
  * @ah: The &struct ath5k_hw
  *
  * Sets the current BSSID and BSSID mask we have from the
  * common struct into the hardware
  */
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
        ath5k_hw_enable_pspoll(ah, NULL, 0);
 }
 
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
 {
        struct ath_common *common = ath5k_hw_common(ah);
 
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
                ath_hw_setbssidmask(common);
 }
 
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
  */
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
 {
        ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
        ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
 }
 
 /**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
  * @ah: The &struct ath5k_hw
  *
  * Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
  * and pass to the driver. For a list of frame types
  * check out reg.h.
  */
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
 {
        u32 data, filter = 0;
 
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
 }
 
 /**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
  * @ah: The &struct ath5k_hw
  * @filter: RX filter mask (see reg.h)
  *
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
  * register on 5212 and newer chips so that we have proper PHY
  * error reporting.
  */
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
 {
        u32 data = 0;
 
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
 #define ATH5K_MAX_TSF_READ 10
 
 /**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
  * @ah: The &struct ath5k_hw
  *
  * Returns the current TSF
  */
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
 {
        u32 tsf_lower, tsf_upper1, tsf_upper2;
        int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
        return ((u64)tsf_upper1 << 32) | tsf_lower;
 }
 
+#undef ATH5K_MAX_TSF_READ
+
 /**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
  * @ah: The &struct ath5k_hw
  * @tsf64: The new 64bit TSF
  *
  * Sets the new TSF
  */
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
 {
        ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
        ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
 }
 
 /**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
  * @ah: The &struct ath5k_hw
  *
  * Forces a TSF reset on PCU
  */
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
 {
        u32 val;
 
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
        ath5k_hw_reg_write(ah, val, AR5K_BEACON);
 }
 
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
  */
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
 {
        u32 timer1, timer2, timer3;
 
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
 }
 
 /**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
  * @a: timer a (before b)
  * @b: timer b (after a)
  * @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
 }
 
 /**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
  * @ah: The &struct ath5k_hw
  * @intval: beacon interval
  *
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
  *
  * The need for this function arises from the fact that we have 4 separate
  * HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
 }
 
 /**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
  * @ah: The &struct ath5k_hw
  * @coverage_class: IEEE 802.11 coverage class number
  *
  * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
  */
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
 {
        /* As defined by IEEE 802.11-2007 17.3.8.6 */
        int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
 \***************************/
 
 /**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
  * @ah: The &struct ath5k_hw
  *
  * Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
  *
  * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
  */
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
 {
        AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
 /**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
  * @ah: The &struct ath5k_hw
  *
  * Stops RX engine on PCU
  */
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
 {
        AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
 }
 
 /**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
  * @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
  *
  * Configure PCU for the various operating modes (AP/STA etc)
  */
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
        return 0;
 }
 
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
-                                                               u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
 {
        /* Set bssid and bssid mask */
        ath5k_hw_set_bssid(ah);
index 01cb72d..e1f8613 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * PHY functions
- *
  * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
  * Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
  * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
  *
  */
 
+/***********************\
+* PHY related functions *
+\***********************/
+
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 #include "../regd.h"
 
 
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
  */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
 {
        unsigned int i;
        u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
                return 0;
        }
 
-       mdelay(2);
+       usleep_range(2000, 2500);
 
        /* ...wait until PHY is ready and read the selected radio revision */
        ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
        return ret;
 }
 
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
  */
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 {
        u16 freq = channel->center_freq;
 
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
        return false;
 }
 
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
                                struct ieee80211_channel *channel)
 {
        u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
                return false;
 }
 
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
  */
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
-                                       const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
                                        u32 val, u8 reg_id, bool set)
 {
        const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
 }
 
 /**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
  * @ah: the &struct ath5k_hw
  * @channel: the currently set channel upon reset
  *
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
  * mantissa and provide these values on hw.
  *
  * For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
  */
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
-       struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+                               struct ieee80211_channel *channel)
 {
        /* Get exponent and mantissa and set it */
        u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
        return 0;
 }
 
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
 int ath5k_hw_phy_disable(struct ath5k_hw *ah)
 {
        /*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
        return 0;
 }
 
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
                        struct ieee80211_channel *channel)
 {
        /*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
                        delay = delay << 2;
                /* XXX: /2 on turbo ? Let's be safe
                 * for now */
-               udelay(100 + delay);
+               usleep_range(100 + delay, 100 + (2 * delay));
        } else {
-               mdelay(1);
+               usleep_range(1000, 1500);
        }
 }
 
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
 * RF Gain optimization *
 \**********************/
 
-/*
+/**
+ * DOC: RF Gain optimization
+ *
  * This code is used to optimize RF gain on different environments
  * (temperature mostly) based on feedback from a power detector.
  *
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
  * no gain optimization ladder-.
  *
  * For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
  *
  * This paper describes power drops as seen on the receiver due to
  * probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
  *
  * And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
  * with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
  */
 
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
 int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
 {
        /* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
        return 0;
 }
 
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
  * That means our next packet is going to be sent with lower
  * tx power and a Peak to Average Power Detector (PAPD) will try
  * to measure the gain.
  *
- * XXX:  How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
  * just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
  */
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
 {
 
        /* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
 
 }
 
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
 {
        u32 mix, step;
        u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
        return ah->ah_gain.g_f_corr;
 }
 
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
  * power detector windows. If we get a measurement outside range
  * we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
 {
        const struct ath5k_rf_reg *rf_regs;
        u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
                        ah->ah_gain.g_current <= level[3]);
 }
 
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
 {
        const struct ath5k_gain_opt *go;
        const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
        return ret;
 }
 
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
  * Check for a new gain reading and schedule an adjustment
  * if needed.
  *
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
 {
        u32 data, type;
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
        return ah->ah_gain.g_state;
 }
 
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
 {
        const struct ath5k_ini_rfgain *ath5k_rfg;
        unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
 }
 
 
-
 /********************\
 * RF Registers setup *
 \********************/
 
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
  */
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
-       struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+                       struct ieee80211_channel *channel,
+                       unsigned int mode)
 {
        const struct ath5k_rf_reg *rf_regs;
        const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
   PHY/RF channel functions
 \**************************/
 
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
  */
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
 {
        u32 athchan;
 
-       /*
-        * Convert IEEE channel/MHz to an internal channel value used
-        * by the AR5210 chipset. This has not been verified with
-        * newer chipsets like the AR5212A who have a completely
-        * different RF/PHY part.
-        */
        athchan = (ath5k_hw_bitswap(
                        (ieee80211_frequency_to_channel(
                                channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
        return athchan;
 }
 
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
        data = ath5k_hw_rf5110_chan2athchan(channel);
        ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
        ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        return 0;
 }
 
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
  */
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
                struct ath5k_athchan_2ghz *athchan)
 {
        int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
        return 0;
 }
 
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
  */
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
  */
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
        data = data0 = data1 = data2 = 0;
        c = channel->center_freq;
 
+       /* My guess based on code:
+        * 2GHz RF has 2 synth modes, one with a Local Oscillator
+        * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+        * (3040/2). data0 is used to set the PLL divider and data1
+        * selects synth mode. */
        if (c < 4800) {
+               /* Channel 14 and all frequencies with 2Hz spacing
+                * below/above (non-standard channels) */
                if (!((c - 2224) % 5)) {
+                       /* Same as (c - 2224) / 5 */
                        data0 = ((2 * (c - 704)) - 3040) / 10;
                        data1 = 1;
+               /* Channel 1 and all frequencies with 5Hz spacing
+                * below/above (standard channels without channel 14) */
                } else if (!((c - 2192) % 5)) {
+                       /* Same as (c - 2192) / 5 */
                        data0 = ((2 * (c - 672)) - 3040) / 10;
                        data1 = 0;
                } else
                        return -EINVAL;
 
                data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+       /* This is more complex, we have a single synthesizer with
+        * 4 reference clock settings (?) based on frequency spacing
+        * and set using data2. LO is at 4800Hz and data0 is again used
+        * to set some divider.
+        *
+        * NOTE: There is an old atheros presentation at Stanford
+        * that mentions a method called dual direct conversion
+        * with 1GHz sliding IF for RF5110. Maybe that's what we
+        * have here, or an updated version. */
        } else if ((c % 5) != 2 || c > 5435) {
                if (!(c % 20) && c >= 5120) {
                        data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
  */
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
  */
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
        return 0;
 }
 
+
 /*****************\
   PHY calibration
 \*****************/
 
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
 {
        s32 val;
 
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
        return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
 }
 
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
 {
        int i;
 
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
                ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
 }
 
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
 static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
 {
        struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
        hist->nfval[hist->index] = noise_floor;
 }
 
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
 {
        s16 sort[ATH5K_NF_CAL_HIST_MAX];
        s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
        return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
 }
 
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
  *
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
  */
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
                return;
        }
 
+       ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
        ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
 
        /* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
 
        ah->ah_noise_floor = nf;
 
+       ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
        ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
                "noise floor calibrated: %d\n", nf);
 }
 
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
  */
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        u32 phy_sig, phy_agc, phy_sat, beacon;
        int ret;
 
+       if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+               return 0;
+
        /*
         * Disable beacons and RX/TX queues, wait
         */
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
        beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
        ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
 
-       mdelay(2);
+       usleep_range(2000, 2500);
 
        /*
         * Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
         * Activate PHY and wait
         */
        ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
 
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
        ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
        AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
 
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        /*
         * Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
        return 0;
 }
 
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
  */
 static int
 ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
        s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
        int i;
 
-       if (!ah->ah_calibration ||
-               ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
-               return 0;
+       /* Skip if I/Q calibration is not needed or if it's still running */
+       if (!ah->ah_iq_cal_needed)
+               return -EINVAL;
+       else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+               ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+                               "I/Q calibration still running");
+               return -EBUSY;
+       }
 
        /* Calibration has finished, get the results and re-run */
-       /* work around empty results which can apparently happen on 5212 */
+
+       /* Work around for empty results which can apparently happen on 5212:
+        * Read registers up to 10 times until we get both i_pr and q_pwr */
        for (i = 0; i <= 10; i++) {
                iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
                i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
        else
                q_coffd = q_pwr >> 7;
 
-       /* protect against divide by 0 and loss of sign bits */
+       /* In case i_coffd became zero, cancel calibration
+        * not only it's too small, it'll also result a divide
+        * by zero later on. */
        if (i_coffd == 0 || q_coffd < 2)
-               return 0;
+               return -ECANCELED;
+
+       /* Protect against loss of sign bits */
 
        i_coff = (-iq_corr) / i_coffd;
        i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
        return 0;
 }
 
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
  */
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
                return ath5k_hw_rf5110_calibrate(ah, channel);
 
        ret = ath5k_hw_rf511x_iq_calibrate(ah);
+       if (ret) {
+               ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+                       "No I/Q correction performed (%uMHz)\n",
+                       channel->center_freq);
+
+               /* Happens all the time if there is not much
+                * traffic, consider it normal behaviour. */
+               ret = 0;
+       }
+
+       /* On full calibration do an AGC calibration and
+        * request a PAPD probe for gainf calibration if
+        * needed */
+       if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
 
-       if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
-           (channel->hw_value != AR5K_MODE_11B))
-               ath5k_hw_request_rfgain_probe(ah);
+               AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+                                       AR5K_PHY_AGCCTL_CAL);
+
+               ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+                       AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+                       0, false);
+               if (ret) {
+                       ATH5K_ERR(ah,
+                               "gain calibration timeout (%uMHz)\n",
+                               channel->center_freq);
+               }
+
+               if ((ah->ah_radio == AR5K_RF5111 ||
+                       ah->ah_radio == AR5K_RF5112)
+                       && (channel->hw_value != AR5K_MODE_11B))
+                       ath5k_hw_request_rfgain_probe(ah);
+       }
+
+       /* Update noise floor
+        * XXX: Only do this after AGC calibration */
+       if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+               ath5k_hw_update_noise_floor(ah);
 
        return ret;
 }
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
 * Spur mitigation functions *
 \***************************/
 
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
 static void
 ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
                                struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
 * Antenna control *
 \*****************/
 
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A        -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B        -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
 ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
 {
        if (ah->ah_version != AR5K_AR5210)
                ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
 }
 
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() -  Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
  */
 static void
 ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
        }
 }
 
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
 void
 ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
 {
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
                AR5K_PHY_ANT_SWITCH_TABLE_1);
 }
 
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() -  Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
  */
 void
 ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
  * Helper functions
  */
 
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
  */
 static s16
 ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
        return result;
 }
 
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
  *
  * Since we have the top of the curve and we draw the line below
  * until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
  */
 static s16
 ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
        return max(min_pwrL, min_pwrR);
 }
 
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
  * Interpolate (pwr,vpd) points to create a Power to PDADC or a
  * Power to PCDAC curve.
  *
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
        }
 }
 
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
  * Get the surrounding per-channel power calibration piers
  * for a given frequency so that we can interpolate between
  * them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
        *pcinfo_r = &pcinfo[idx_r];
 }
 
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
  * Get the surrounding per-rate power calibration data
  * for a given frequency and interpolate between power
  * values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
  */
 static void
 ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
                                        rpinfo[idx_r].target_power_54);
 }
 
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
  * Get the max edge power for this channel if
  * we have such data from EEPROM's Conformance Test
  * Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
  * Power to PCDAC table functions
  */
 
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
  *
  * No further processing is needed for RF5111, the only thing we have to
  * do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
 
 }
 
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
  *
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
  * RFX112 can have up to 2 curves (one for low txpower range and one for
  * higher txpower range). We need to put them both on pcdac_out and place
  * them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
        }
 }
 
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
 static void
 ath5k_write_pcdac_table(struct ath5k_hw *ah)
 {
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
  * Power to PDADC table functions
  */
 
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
  *
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
  * We can have up to 4 pd curves, we need to do a similar process
  * as we do for RF5112. This time we don't have an edge_flag but we
  * set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
 
 }
 
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
 static void
 ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
 {
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
  * Common code for PCDAC/PDADC tables
  */
 
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
  * This is the main function that uses all of the above
  * to set PCDAC/PDADC table on hw for the current channel.
  * This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
        return 0;
 }
 
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
 static void
 ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
 {
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
                ath5k_write_pcdac_table(ah);
 }
 
-/*
- * Per-rate tx power setting
+
+/**
+ * DOC: Per-rate tx power setting
  *
- * This is the code that sets the desired tx power (below
+ * This is the code that sets the desired tx power limit (below
  * maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
-
-/*
- * Set rate power table
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
  *
  * For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
  *
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
  * rates[0] - rates[7] -> OFDM rates
  * rates[8] - rates[14] -> CCK rates
  * rates[15] -> XR rates (they all have the same power)
  */
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
 static void
 ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
                        struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
 }
 
 
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
  */
 static int
 ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        return 0;
 }
 
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
 {
        ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
                "changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
        return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
 }
 
+
 /*************\
  Init function
 \*************/
 
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                      u8 mode, bool fast)
 {
        struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                if (ret)
                        return ret;
 
-               mdelay(1);
+               usleep_range(1000, 1500);
 
                /*
                 * Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
                }
 
        } else if (ah->ah_version == AR5K_AR5210) {
-               mdelay(1);
+               usleep_range(1000, 1500);
                /* Disable phy and wait */
                ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
-               mdelay(1);
+               usleep_range(1000, 1500);
        }
 
        /* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
        for (i = 0; i <= 20; i++) {
                if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
                        break;
-               udelay(200);
+               usleep_range(200, 250);
        }
        ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
 
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
 
        /* At the same time start I/Q calibration for QAM constellation
         * -no need for CCK- */
-       ah->ah_calibration = false;
+       ah->ah_iq_cal_needed = false;
        if (!(mode == AR5K_MODE_11B)) {
-               ah->ah_calibration = true;
+               ah->ah_iq_cal_needed = true;
                AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
                                AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
                AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
index 7766542..30b50f9 100644 (file)
  */
 
 /********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
 \********************************************/
 
 #include "ath5k.h"
 #include "reg.h"
 #include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
 
 
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a  given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  */
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
 {
        u32 pending;
        AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
        return pending;
 }
 
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  */
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
        if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
                return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
        AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
 }
 
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
  * Make sure cw is a power of 2 minus 1 and smaller than 1024
  */
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
 {
-       u32 cw = 1;
        cw_req = min(cw_req, (u16)1023);
 
-       while (cw < cw_req)
-               cw = (cw << 1) | 1;
+       /* Check if cw_req + 1 a power of 2 */
+       if (is_power_of_2(cw_req + 1))
+               return cw_req;
 
-       return cw;
+       /* Check if cw_req is a power of 2 */
+       if (is_power_of_2(cw_req))
+               return cw_req - 1;
+
+       /* If none of the above is correct
+        * find the closest power of 2 */
+       cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+       return cw_req;
 }
 
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
  */
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
                struct ath5k_txq_info *queue_info)
 {
        memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
        return 0;
 }
 
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
  */
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
                                const struct ath5k_txq_info *qinfo)
 {
        struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
        return 0;
 }
 
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
  */
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
                struct ath5k_txq_info *queue_info)
 {
        unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
 * Single QCU/DCU initialization *
 \*******************************/
 
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
  */
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
                                  unsigned int queue)
 {
        /* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
 }
 
 /**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
  *
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
  * and configures all queue-specific parameters.
  */
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 {
        struct ath5k_txq_info *tq = &ah->ah_txq[queue];
 
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
 \**************************/
 
 /**
- * ath5k_hw_set_ifs_intervals  - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals()  - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
  *
  * Sets the global IFS intervals on DCU (also works on AR5210) for
  * the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
 }
 
 
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
 {
        int i, ret;
 
index f5c1000..0ea1608 100644 (file)
  * 5211/5212 we have one primary and 4 secondary registers.
  * So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
  * Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
  */
 #define AR5K_ISR               0x001c                  /* Register Address [5210] */
 #define AR5K_PISR              0x0080                  /* Register Address [5211+] */
 #define AR5K_ISR_TXOK          0x00000040      /* Frame successfully transmitted */
 #define AR5K_ISR_TXDESC                0x00000080      /* TX descriptor request */
 #define AR5K_ISR_TXERR         0x00000100      /* Transmit error */
-#define AR5K_ISR_TXNOFRM       0x00000200      /* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM       0x00000200      /* No frame transmitted (transmit timeout)
+                                                * NOTE: We don't have per-queue info for this
+                                                * one, but we can enable it per-queue through
+                                                * TXNOFRM_QCU field on TXNOFRM register */
 #define AR5K_ISR_TXEOL         0x00000400      /* Empty TX descriptor */
 #define AR5K_ISR_TXURN         0x00000800      /* Transmit FIFO underrun */
 #define AR5K_ISR_MIB           0x00001000      /* Update MIB counters */
 #define AR5K_ISR_SWBA          0x00010000      /* Software beacon alert */
 #define AR5K_ISR_BRSSI         0x00020000      /* Beacon rssi below threshold (?) */
 #define AR5K_ISR_BMISS         0x00040000      /* Beacon missed */
-#define AR5K_ISR_HIUERR                0x00080000      /* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR                0x00080000      /* Host Interface Unit error [5211+]
+                                                * 'or' of MCABT, SSERR, DPERR from SISR2 */
 #define AR5K_ISR_BNR           0x00100000      /* Beacon not ready [5211+] */
 #define AR5K_ISR_MCABT         0x00100000      /* Master Cycle Abort [5210] */
 #define AR5K_ISR_RXCHIRP       0x00200000      /* CHIRP Received [5212+] */
 #define AR5K_ISR_SSERR         0x00200000      /* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR         0x00400000      /* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR         0x00400000      /* Bus parity error [5210] */
 #define AR5K_ISR_RXDOPPLER     0x00400000      /* Doppler chirp received [5212+] */
 #define AR5K_ISR_TIM           0x00800000      /* [5211+] */
-#define AR5K_ISR_BCNMISC       0x00800000      /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
-                                               CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC       0x00800000      /* Misc beacon related interrupt
+                                                * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+                                                * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
 #define AR5K_ISR_GPIO          0x01000000      /* GPIO (rf kill) */
 #define AR5K_ISR_QCBRORN       0x02000000      /* QCU CBR overrun [5211+] */
 #define AR5K_ISR_QCBRURN       0x04000000      /* QCU CBR underrun [5211+] */
 #define AR5K_ISR_QTRIG         0x08000000      /* QCU scheduling trigger [5211+] */
 
+#define        AR5K_ISR_BITS_FROM_SISRS        (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+                                       AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+                                       AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+                                       AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+                                       AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
 /*
  * Secondary status registers [5211+] (0 - 4)
  *
 #define        AR5K_SISR2_BCN_TIMEOUT  0x08000000      /* Beacon Timeout [5212+] */
 #define        AR5K_SISR2_CAB_TIMEOUT  0x10000000      /* CAB Timeout [5212+] */
 #define        AR5K_SISR2_DTIM         0x20000000      /* [5212+] */
-#define        AR5K_SISR2_TSFOOR       0x80000000      /* TSF OOR (?) */
+#define        AR5K_SISR2_TSFOOR       0x80000000      /* TSF Out of range */
 
 #define AR5K_SISR3             0x0090                  /* Register Address [5211+] */
 #define AR5K_SISR3_QCBRORN     0x000003ff      /* Mask for QCBRORN */
index 2abac25..4aed3a3 100644 (file)
@@ -19,9 +19,9 @@
  *
  */
 
-/*****************************\
-  Reset functions and helpers
-\*****************************/
+/****************************\
+  Reset function and helpers
+\****************************/
 
 #include <asm/unaligned.h>
 
 #include "debug.h"
 
 
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
 /******************\
 * Helper functions *
 \******************/
 
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
  */
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
                              bool is_set)
 {
        int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
 \*************************/
 
 /**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
  * @ah: The &struct ath5k_hw
  * @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
  */
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        return usec * common->clockrate;
 }
 
 /**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
  * @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
  */
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
 {
        struct ath_common *common = ath5k_hw_common(ah);
        return clock / common->clockrate;
 }
 
 /**
- * ath5k_hw_init_core_clock - Initialize core clock
- *
- * @ah The &struct ath5k_hw
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
  *
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
  */
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
 {
        struct ieee80211_channel *channel = ah->ah_current_channel;
        struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
        }
 }
 
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
  * If there is an external 32KHz crystal available, use it
  * as ref. clock instead of 32/40MHz clock and baseband clocks
  * to save power during sleep or restore normal 32/40MHz
  * operation.
  *
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- *     123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
  */
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
        u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
 * Reset/Sleep control *
 \*********************/
 
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
  */
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
 {
        int ret;
        u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
        ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
 
        /* Wait at least 128 PCI clocks */
-       udelay(15);
+       usleep_range(15, 20);
 
        if (ah->ah_version == AR5K_AR5210) {
                val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
        return ret;
 }
 
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() -  Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
  */
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
 {
        u32 mask = flags ? flags : ~0U;
        u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
        regval = __raw_readl(reg);
        __raw_writel(regval | val, reg);
        regval = __raw_readl(reg);
-       udelay(100);
+       usleep_range(100, 150);
 
        /* Bring BB/MAC out of reset */
        __raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
        return 0;
 }
 
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
  */
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
                              bool set_chip, u16 sleep_duration)
 {
        unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
 
                ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
                                                        AR5K_SLEEP_CTL);
-               udelay(15);
+               usleep_range(15, 20);
 
                for (i = 200; i > 0; i--) {
                        /* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
                                break;
 
                        /* Wait a bit and retry */
-                       udelay(50);
+                       usleep_range(50, 75);
                        ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
                                                        AR5K_SLEEP_CTL);
                }
@@ -523,17 +589,20 @@ commit:
        return 0;
 }
 
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
  *
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
  * without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
  */
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
 {
        struct pci_dev *pdev = ah->pdev;
        u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
                return 0;
 
        /* Make sure device is awake */
-       ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+       ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
        if (ret) {
                ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
                return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
                        AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-                       mdelay(2);
+                       usleep_range(2000, 2500);
        } else {
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
        }
 
        /* ...wakeup again!*/
-       ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+       ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
        if (ret) {
                ATH5K_ERR(ah, "failed to put device on hold\n");
                return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
        return ret;
 }
 
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
  * Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
  */
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 {
        struct pci_dev *pdev = ah->pdev;
        u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 
        if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
                /* Wakeup the device */
-               ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+               ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
                if (ret) {
                        ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
                        return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
                        AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
                        AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
-                       mdelay(2);
+                       usleep_range(2000, 2500);
        } else {
                if (ath5k_get_bus_type(ah) == ATH_AHB)
                        ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
        }
 
        /* ...wakeup again!...*/
-       ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+       ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
        if (ret) {
                ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
                return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
                /* ...update PLL if needed */
                if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
                        ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
-                       udelay(300);
+                       usleep_range(300, 350);
                }
 
                /* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
 * Post-initvals register modifications *
 \**************************************/
 
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
                                struct ieee80211_channel *channel)
 {
        if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
        }
 }
 
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
                struct ieee80211_channel *channel)
 {
        struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
 * Main reset function *
 \*********************/
 
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
                struct ieee80211_channel *channel, bool fast, bool skip_pcu)
 {
        u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
        /*
         * Initialize PCU
         */
-       ath5k_hw_pcu_init(ah, op_mode, mode);
+       ath5k_hw_pcu_init(ah, op_mode);
 
        /*
         * Initialize PHY
index 5d11c23..aed34d9 100644 (file)
@@ -18,7 +18,9 @@
  */
 
 
-/*
+/**
+ * DOC: RF Buffer registers
+ *
  * There are some special registers on the RF chip
  * that control various operation settings related mostly to
  * the analog parts (channel, gain adjustment etc).
  */
 
 
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
  * Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
  */
 struct ath5k_ini_rfbuffer {
-       u8      rfb_bank;               /* RF Bank number */
-       u16     rfb_ctrl_register;      /* RF Buffer control register */
-       u32     rfb_mode_data[3];       /* RF Buffer data for each mode */
+       u8      rfb_bank;
+       u16     rfb_ctrl_register;
+       u32     rfb_mode_data[3];
 };
 
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
  * Struct to hold RF Buffer field
  * infos used to access certain RF
  * analog registers
  */
 struct ath5k_rfb_field {
-       u8      len;    /* Field length */
-       u16     pos;    /* Offset on the raw packet */
-       u8      col;    /* Column -used for shifting */
+       u8      len;
+       u16     pos;
+       u8      col;
 };
 
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
  */
 struct ath5k_rf_reg {
-       u8                      bank;   /* RF Buffer Bank number */
-       u8                      index;  /* Register's index on rf_regs_idx */
-       struct ath5k_rfb_field  field;  /* RF Buffer field for this register */
+       u8                      bank;
+       u8                      index;
+       struct ath5k_rfb_field  field;
 };
 
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
  * We do this to handle common bits and make our
  * life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
 enum ath5k_rf_regs_idx {
        /* BANK 2 */
        AR5K_RF_TURBO = 0,
index ebfae05..4d21df0 100644 (file)
  *
  */
 
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
  * Mode-specific RF Gain table (64bytes) for RF5111/5112
  * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
  * RF Gain values are included in AR5K_AR5210_INI)
  */
 struct ath5k_ini_rfgain {
-       u16     rfg_register;   /* RF Gain register address */
+       u16     rfg_register;
        u32     rfg_value[2];   /* [freq (see below)] */
 };
 
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
 #define AR5K_GAIN_CHECK_ADJUST(_g)             \
        ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
 
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
 struct ath5k_gain_opt_step {
        s8                              gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
        s8                              gos_gain;
 };
 
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
 struct ath5k_gain_opt {
        u8                              go_default;
        u8                              go_steps_count;
        const struct ath5k_gain_opt_step        go_step[AR5K_GAIN_STEP_COUNT];
 };
 
+
 /*
+ * RF5111
  * Parameters on gos_param:
  * 1) Tx clip PHY register
  * 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
 };
 
 /*
+ * RF5112
  * Parameters on gos_param:
  * 1) Mixgain ovr RF register
  * 2) PWD 138 RF register
index 8f7a0d1..7070693 100644 (file)
@@ -23,7 +23,7 @@
 
 obj-$(CONFIG_ATH6KL) := ath6kl.o
 ath6kl-y += debug.o
-ath6kl-y += htc_hif.o
+ath6kl-y += hif.o
 ath6kl-y += htc.o
 ath6kl-y += bmi.o
 ath6kl-y += cfg80211.o
index c5d11cc..a962fe4 100644 (file)
@@ -196,8 +196,6 @@ int ath6kl_bmi_done(struct ath6kl *ar)
                return ret;
        }
 
-       ath6kl_bmi_cleanup(ar);
-
        return 0;
 }
 
@@ -672,6 +670,11 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
        return ret;
 }
 
+void ath6kl_bmi_reset(struct ath6kl *ar)
+{
+       ar->bmi.done_sent = false;
+}
+
 int ath6kl_bmi_init(struct ath6kl *ar)
 {
        ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
index 96851d5..009e8f6 100644 (file)
@@ -230,6 +230,8 @@ struct ath6kl_bmi_target_info {
 
 int ath6kl_bmi_init(struct ath6kl *ar);
 void ath6kl_bmi_cleanup(struct ath6kl *ar);
+void ath6kl_bmi_reset(struct ath6kl *ar);
+
 int ath6kl_bmi_done(struct ath6kl *ar);
 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
                               struct ath6kl_bmi_target_info *targ_info);
index f517eb8..0252604 100644 (file)
 #include "testmode.h"
 
 static unsigned int ath6kl_p2p;
+static unsigned int multi_norm_if_support;
 
 module_param(ath6kl_p2p, uint, 0644);
+module_param(multi_norm_if_support, uint, 0644);
 
 #define RATETAB_ENT(_rate, _rateid, _flags) {   \
        .bitrate    = (_rate),                  \
@@ -123,17 +125,19 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
        .bitrates = ath6kl_a_rates,
 };
 
-static int ath6kl_set_wpa_version(struct ath6kl *ar,
+#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
+
+static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
                                  enum nl80211_wpa_versions wpa_version)
 {
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
 
        if (!wpa_version) {
-               ar->auth_mode = NONE_AUTH;
+               vif->auth_mode = NONE_AUTH;
        } else if (wpa_version & NL80211_WPA_VERSION_2) {
-               ar->auth_mode = WPA2_AUTH;
+               vif->auth_mode = WPA2_AUTH;
        } else if (wpa_version & NL80211_WPA_VERSION_1) {
-               ar->auth_mode = WPA_AUTH;
+               vif->auth_mode = WPA_AUTH;
        } else {
                ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
                return -ENOTSUPP;
@@ -142,25 +146,24 @@ static int ath6kl_set_wpa_version(struct ath6kl *ar,
        return 0;
 }
 
-static int ath6kl_set_auth_type(struct ath6kl *ar,
+static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
                                enum nl80211_auth_type auth_type)
 {
-
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
 
        switch (auth_type) {
        case NL80211_AUTHTYPE_OPEN_SYSTEM:
-               ar->dot11_auth_mode = OPEN_AUTH;
+               vif->dot11_auth_mode = OPEN_AUTH;
                break;
        case NL80211_AUTHTYPE_SHARED_KEY:
-               ar->dot11_auth_mode = SHARED_AUTH;
+               vif->dot11_auth_mode = SHARED_AUTH;
                break;
        case NL80211_AUTHTYPE_NETWORK_EAP:
-               ar->dot11_auth_mode = LEAP_AUTH;
+               vif->dot11_auth_mode = LEAP_AUTH;
                break;
 
        case NL80211_AUTHTYPE_AUTOMATIC:
-               ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+               vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
                break;
 
        default:
@@ -171,11 +174,11 @@ static int ath6kl_set_auth_type(struct ath6kl *ar,
        return 0;
 }
 
-static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
 {
-       u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
-       u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
-               &ar->grp_crypto_len;
+       u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
+       u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
+               &vif->grp_crypto_len;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
                   __func__, cipher, ucast);
@@ -210,28 +213,35 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
        return 0;
 }
 
-static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
 {
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
 
        if (key_mgmt == WLAN_AKM_SUITE_PSK) {
-               if (ar->auth_mode == WPA_AUTH)
-                       ar->auth_mode = WPA_PSK_AUTH;
-               else if (ar->auth_mode == WPA2_AUTH)
-                       ar->auth_mode = WPA2_PSK_AUTH;
+               if (vif->auth_mode == WPA_AUTH)
+                       vif->auth_mode = WPA_PSK_AUTH;
+               else if (vif->auth_mode == WPA2_AUTH)
+                       vif->auth_mode = WPA2_PSK_AUTH;
+       } else if (key_mgmt == 0x00409600) {
+               if (vif->auth_mode == WPA_AUTH)
+                       vif->auth_mode = WPA_AUTH_CCKM;
+               else if (vif->auth_mode == WPA2_AUTH)
+                       vif->auth_mode = WPA2_AUTH_CCKM;
        } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
-               ar->auth_mode = NONE_AUTH;
+               vif->auth_mode = NONE_AUTH;
        }
 }
 
-static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
 {
+       struct ath6kl *ar = vif->ar;
+
        if (!test_bit(WMI_READY, &ar->flag)) {
                ath6kl_err("wmi is not ready\n");
                return false;
        }
 
-       if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+       if (!test_bit(WLAN_ENABLED, &vif->flags)) {
                ath6kl_err("wlan disabled\n");
                return false;
        }
@@ -239,15 +249,143 @@ static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
        return true;
 }
 
+static bool ath6kl_is_wpa_ie(const u8 *pos)
+{
+       return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+               pos[2] == 0x00 && pos[3] == 0x50 &&
+               pos[4] == 0xf2 && pos[5] == 0x01;
+}
+
+static bool ath6kl_is_rsn_ie(const u8 *pos)
+{
+       return pos[0] == WLAN_EID_RSN;
+}
+
+static bool ath6kl_is_wps_ie(const u8 *pos)
+{
+       return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+               pos[1] >= 4 &&
+               pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
+               pos[5] == 0x04);
+}
+
+static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
+                                   size_t ies_len)
+{
+       struct ath6kl *ar = vif->ar;
+       const u8 *pos;
+       u8 *buf = NULL;
+       size_t len = 0;
+       int ret;
+
+       /*
+        * Clear previously set flag
+        */
+
+       ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+       /*
+        * Filter out RSN/WPA IE(s)
+        */
+
+       if (ies && ies_len) {
+               buf = kmalloc(ies_len, GFP_KERNEL);
+               if (buf == NULL)
+                       return -ENOMEM;
+               pos = ies;
+
+               while (pos + 1 < ies + ies_len) {
+                       if (pos + 2 + pos[1] > ies + ies_len)
+                               break;
+                       if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
+                               memcpy(buf + len, pos, 2 + pos[1]);
+                               len += 2 + pos[1];
+                       }
+
+                       if (ath6kl_is_wps_ie(pos))
+                               ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
+
+                       pos += 2 + pos[1];
+               }
+       }
+
+       ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+                                      WMI_FRAME_ASSOC_REQ, buf, len);
+       kfree(buf);
+       return ret;
+}
+
+static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
+{
+       switch (type) {
+       case NL80211_IFTYPE_STATION:
+               *nw_type = INFRA_NETWORK;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               *nw_type = ADHOC_NETWORK;
+               break;
+       case NL80211_IFTYPE_AP:
+               *nw_type = AP_NETWORK;
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               *nw_type = INFRA_NETWORK;
+               break;
+       case NL80211_IFTYPE_P2P_GO:
+               *nw_type = AP_NETWORK;
+               break;
+       default:
+               ath6kl_err("invalid interface type %u\n", type);
+               return -ENOTSUPP;
+       }
+
+       return 0;
+}
+
+static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
+                                  u8 *if_idx, u8 *nw_type)
+{
+       int i;
+
+       if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
+               return false;
+
+       if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
+           ar->num_vif))
+               return false;
+
+       if (type == NL80211_IFTYPE_STATION ||
+           type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
+               for (i = 0; i < MAX_NUM_VIF; i++) {
+                       if ((ar->avail_idx_map >> i) & BIT(0)) {
+                               *if_idx = i;
+                               return true;
+                       }
+               }
+       }
+
+       if (type == NL80211_IFTYPE_P2P_CLIENT ||
+           type == NL80211_IFTYPE_P2P_GO) {
+               for (i = ar->max_norm_iface; i < MAX_NUM_VIF; i++) {
+                       if ((ar->avail_idx_map >> i) & BIT(0)) {
+                               *if_idx = i;
+                               return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
 static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                                   struct cfg80211_connect_params *sme)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
        int status;
 
-       ar->sme_state = SME_CONNECTING;
+       vif->sme_state = SME_CONNECTING;
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -287,12 +425,19 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                }
        }
 
-       if (test_bit(CONNECTED, &ar->flag) &&
-           ar->ssid_len == sme->ssid_len &&
-           !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
-               ar->reconnect_flag = true;
-               status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
-                                                 ar->ch_hint);
+       if (sme->ie && (sme->ie_len > 0)) {
+               status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+               if (status)
+                       return status;
+       }
+
+       if (test_bit(CONNECTED, &vif->flags) &&
+           vif->ssid_len == sme->ssid_len &&
+           !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+               vif->reconnect_flag = true;
+               status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
+                                                 vif->req_bssid,
+                                                 vif->ch_hint);
 
                up(&ar->sem);
                if (status) {
@@ -300,42 +445,43 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                        return -EIO;
                }
                return 0;
-       } else if (ar->ssid_len == sme->ssid_len &&
-                  !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
-               ath6kl_disconnect(ar);
+       } else if (vif->ssid_len == sme->ssid_len &&
+                  !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+               ath6kl_disconnect(vif);
        }
 
-       memset(ar->ssid, 0, sizeof(ar->ssid));
-       ar->ssid_len = sme->ssid_len;
-       memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+       memset(vif->ssid, 0, sizeof(vif->ssid));
+       vif->ssid_len = sme->ssid_len;
+       memcpy(vif->ssid, sme->ssid, sme->ssid_len);
 
        if (sme->channel)
-               ar->ch_hint = sme->channel->center_freq;
+               vif->ch_hint = sme->channel->center_freq;
 
-       memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+       memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
        if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
-               memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+               memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
 
-       ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+       ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
 
-       status = ath6kl_set_auth_type(ar, sme->auth_type);
+       status = ath6kl_set_auth_type(vif, sme->auth_type);
        if (status) {
                up(&ar->sem);
                return status;
        }
 
        if (sme->crypto.n_ciphers_pairwise)
-               ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+               ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
        else
-               ath6kl_set_cipher(ar, 0, true);
+               ath6kl_set_cipher(vif, 0, true);
 
-       ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+       ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
 
        if (sme->crypto.n_akm_suites)
-               ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+               ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
 
        if ((sme->key_len) &&
-           (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+           (vif->auth_mode == NONE_AUTH) &&
+           (vif->prwise_crypto == WEP_CRYPT)) {
                struct ath6kl_key *key = NULL;
 
                if (sme->key_idx < WMI_MIN_KEY_INDEX ||
@@ -346,56 +492,57 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                        return -ENOENT;
                }
 
-               key = &ar->keys[sme->key_idx];
+               key = &vif->keys[sme->key_idx];
                key->key_len = sme->key_len;
                memcpy(key->key, sme->key, key->key_len);
-               key->cipher = ar->prwise_crypto;
-               ar->def_txkey_index = sme->key_idx;
+               key->cipher = vif->prwise_crypto;
+               vif->def_txkey_index = sme->key_idx;
 
-               ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
-                                     ar->prwise_crypto,
+               ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
+                                     vif->prwise_crypto,
                                      GROUP_USAGE | TX_USAGE,
                                      key->key_len,
-                                     NULL,
+                                     NULL, 0,
                                      key->key, KEY_OP_INIT_VAL, NULL,
                                      NO_SYNC_WMIFLAG);
        }
 
        if (!ar->usr_bss_filter) {
-               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-               if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+               if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+                   ALL_BSS_FILTER, 0) != 0) {
                        ath6kl_err("couldn't set bss filtering\n");
                        up(&ar->sem);
                        return -EIO;
                }
        }
 
-       ar->nw_type = ar->next_mode;
+       vif->nw_type = vif->next_mode;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                   "%s: connect called with authmode %d dot11 auth %d"
                   " PW crypto %d PW crypto len %d GRP crypto %d"
                   " GRP crypto len %d channel hint %u\n",
                   __func__,
-                  ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
-                  ar->prwise_crypto_len, ar->grp_crypto,
-                  ar->grp_crypto_len, ar->ch_hint);
-
-       ar->reconnect_flag = 0;
-       status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
-                                       ar->dot11_auth_mode, ar->auth_mode,
-                                       ar->prwise_crypto,
-                                       ar->prwise_crypto_len,
-                                       ar->grp_crypto, ar->grp_crypto_len,
-                                       ar->ssid_len, ar->ssid,
-                                       ar->req_bssid, ar->ch_hint,
+                  vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+                  vif->prwise_crypto_len, vif->grp_crypto,
+                  vif->grp_crypto_len, vif->ch_hint);
+
+       vif->reconnect_flag = 0;
+       status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+                                       vif->dot11_auth_mode, vif->auth_mode,
+                                       vif->prwise_crypto,
+                                       vif->prwise_crypto_len,
+                                       vif->grp_crypto, vif->grp_crypto_len,
+                                       vif->ssid_len, vif->ssid,
+                                       vif->req_bssid, vif->ch_hint,
                                        ar->connect_ctrl_flags);
 
        up(&ar->sem);
 
        if (status == -EINVAL) {
-               memset(ar->ssid, 0, sizeof(ar->ssid));
-               ar->ssid_len = 0;
+               memset(vif->ssid, 0, sizeof(vif->ssid));
+               vif->ssid_len = 0;
                ath6kl_err("invalid request\n");
                return -ENOENT;
        } else if (status) {
@@ -404,27 +551,28 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
        }
 
        if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
-           ((ar->auth_mode == WPA_PSK_AUTH)
-            || (ar->auth_mode == WPA2_PSK_AUTH))) {
-               mod_timer(&ar->disconnect_timer,
+           ((vif->auth_mode == WPA_PSK_AUTH)
+            || (vif->auth_mode == WPA2_PSK_AUTH))) {
+               mod_timer(&vif->disconnect_timer,
                          jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
        }
 
        ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
-       set_bit(CONNECT_PEND, &ar->flag);
+       set_bit(CONNECT_PEND, &vif->flags);
 
        return 0;
 }
 
-static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, const u8 *bssid,
                                    struct ieee80211_channel *chan,
                                    const u8 *beacon_ie, size_t beacon_ie_len)
 {
+       struct ath6kl *ar = vif->ar;
        struct cfg80211_bss *bss;
        u8 *ie;
 
-       bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
-                              ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
+       bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
+                              vif->ssid, vif->ssid_len, WLAN_CAPABILITY_ESS,
                               WLAN_CAPABILITY_ESS);
        if (bss == NULL) {
                /*
@@ -435,16 +583,16 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
                 * Prepend SSID element since it is not included in the Beacon
                 * IEs from the target.
                 */
-               ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+               ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
                if (ie == NULL)
                        return -ENOMEM;
                ie[0] = WLAN_EID_SSID;
-               ie[1] = ar->ssid_len;
-               memcpy(ie + 2, ar->ssid, ar->ssid_len);
-               memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
-               bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
+               ie[1] = vif->ssid_len;
+               memcpy(ie + 2, vif->ssid, vif->ssid_len);
+               memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
+               bss = cfg80211_inform_bss(ar->wiphy, chan,
                                          bssid, 0, WLAN_CAPABILITY_ESS, 100,
-                                         ie, 2 + ar->ssid_len + beacon_ie_len,
+                                         ie, 2 + vif->ssid_len + beacon_ie_len,
                                          0, GFP_KERNEL);
                if (bss)
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
@@ -463,7 +611,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
        return 0;
 }
 
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
                                   u8 *bssid, u16 listen_intvl,
                                   u16 beacon_intvl,
                                   enum network_type nw_type,
@@ -471,6 +619,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
                                   u8 assoc_resp_len, u8 *assoc_info)
 {
        struct ieee80211_channel *chan;
+       struct ath6kl *ar = vif->ar;
 
        /* capinfo + listen interval */
        u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -489,11 +638,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
         * Store Beacon interval here; DTIM period will be available only once
         * a Beacon frame from the AP is seen.
         */
-       ar->assoc_bss_beacon_int = beacon_intvl;
-       clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+       vif->assoc_bss_beacon_int = beacon_intvl;
+       clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
 
        if (nw_type & ADHOC_NETWORK) {
-               if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+               if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "%s: ath6k not in ibss mode\n", __func__);
                        return;
@@ -501,39 +650,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
        }
 
        if (nw_type & INFRA_NETWORK) {
-               if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
-                   ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+               if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+                   vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "%s: ath6k not in station mode\n", __func__);
                        return;
                }
        }
 
-       chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
+       chan = ieee80211_get_channel(ar->wiphy, (int) channel);
 
 
        if (nw_type & ADHOC_NETWORK) {
-               cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+               cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
                return;
        }
 
-       if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
+       if (ath6kl_add_bss_if_needed(vif, bssid, chan, assoc_info,
                                     beacon_ie_len) < 0) {
                ath6kl_err("could not add cfg80211 bss entry for "
                           "connect/roamed notification\n");
                return;
        }
 
-       if (ar->sme_state == SME_CONNECTING) {
+       if (vif->sme_state == SME_CONNECTING) {
                /* inform connect result to cfg80211 */
-               ar->sme_state = SME_CONNECTED;
-               cfg80211_connect_result(ar->net_dev, bssid,
+               vif->sme_state = SME_CONNECTED;
+               cfg80211_connect_result(vif->ndev, bssid,
                                        assoc_req_ie, assoc_req_len,
                                        assoc_resp_ie, assoc_resp_len,
                                        WLAN_STATUS_SUCCESS, GFP_KERNEL);
-       } else if (ar->sme_state == SME_CONNECTED) {
+       } else if (vif->sme_state == SME_CONNECTED) {
                /* inform roam event to cfg80211 */
-               cfg80211_roamed(ar->net_dev, chan, bssid,
+               cfg80211_roamed(vif->ndev, chan, bssid,
                                assoc_req_ie, assoc_req_len,
                                assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
        }
@@ -543,11 +692,12 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
                                      struct net_device *dev, u16 reason_code)
 {
        struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
                   reason_code);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -560,44 +710,46 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
                return -ERESTARTSYS;
        }
 
-       ar->reconnect_flag = 0;
-       ath6kl_disconnect(ar);
-       memset(ar->ssid, 0, sizeof(ar->ssid));
-       ar->ssid_len = 0;
+       vif->reconnect_flag = 0;
+       ath6kl_disconnect(vif);
+       memset(vif->ssid, 0, sizeof(vif->ssid));
+       vif->ssid_len = 0;
 
        if (!test_bit(SKIP_SCAN, &ar->flag))
-               memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+               memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
 
        up(&ar->sem);
 
-       ar->sme_state = SME_DISCONNECTED;
+       vif->sme_state = SME_DISCONNECTED;
 
        return 0;
 }
 
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
                                      u8 *bssid, u8 assoc_resp_len,
                                      u8 *assoc_info, u16 proto_reason)
 {
-       if (ar->scan_req) {
-               cfg80211_scan_done(ar->scan_req, true);
-               ar->scan_req = NULL;
+       struct ath6kl *ar = vif->ar;
+
+       if (vif->scan_req) {
+               cfg80211_scan_done(vif->scan_req, true);
+               vif->scan_req = NULL;
        }
 
-       if (ar->nw_type & ADHOC_NETWORK) {
-               if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+       if (vif->nw_type & ADHOC_NETWORK) {
+               if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "%s: ath6k not in ibss mode\n", __func__);
                        return;
                }
                memset(bssid, 0, ETH_ALEN);
-               cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+               cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
                return;
        }
 
-       if (ar->nw_type & INFRA_NETWORK) {
-               if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
-                   ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+       if (vif->nw_type & INFRA_NETWORK) {
+               if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+                   vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "%s: ath6k not in station mode\n", __func__);
                        return;
@@ -614,42 +766,44 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
         */
 
        if (reason != DISCONNECT_CMD) {
-               ath6kl_wmi_disconnect_cmd(ar->wmi);
+               ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
                return;
        }
 
-       clear_bit(CONNECT_PEND, &ar->flag);
+       clear_bit(CONNECT_PEND, &vif->flags);
 
-       if (ar->sme_state == SME_CONNECTING) {
-               cfg80211_connect_result(ar->net_dev,
+       if (vif->sme_state == SME_CONNECTING) {
+               cfg80211_connect_result(vif->ndev,
                                bssid, NULL, 0,
                                NULL, 0,
                                WLAN_STATUS_UNSPECIFIED_FAILURE,
                                GFP_KERNEL);
-       } else if (ar->sme_state == SME_CONNECTED) {
-               cfg80211_disconnected(ar->net_dev, reason,
+       } else if (vif->sme_state == SME_CONNECTED) {
+               cfg80211_disconnected(vif->ndev, reason,
                                NULL, 0, GFP_KERNEL);
        }
 
-       ar->sme_state = SME_DISCONNECTED;
+       vif->sme_state = SME_DISCONNECTED;
 }
 
 static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                                struct cfg80211_scan_request *request)
 {
        struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+       struct ath6kl_vif *vif = netdev_priv(ndev);
        s8 n_channels = 0;
        u16 *channels = NULL;
        int ret = 0;
+       u32 force_fg_scan = 0;
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (!ar->usr_bss_filter) {
-               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
                ret = ath6kl_wmi_bssfilter_cmd(
-                       ar->wmi,
-                       (test_bit(CONNECTED, &ar->flag) ?
+                       ar->wmi, vif->fw_vif_idx,
+                       (test_bit(CONNECTED, &vif->flags) ?
                         ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
                if (ret) {
                        ath6kl_err("couldn't set bss filtering\n");
@@ -664,14 +818,15 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                        request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
 
                for (i = 0; i < request->n_ssids; i++)
-                       ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
-                                                 SPECIFIC_SSID_FLAG,
+                       ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+                                                 i + 1, SPECIFIC_SSID_FLAG,
                                                  request->ssids[i].ssid_len,
                                                  request->ssids[i].ssid);
        }
 
        if (request->ie) {
-               ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+               ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+                                              WMI_FRAME_PROBE_REQ,
                                               request->ie, request->ie_len);
                if (ret) {
                        ath6kl_err("failed to set Probe Request appie for "
@@ -702,44 +857,47 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
                        channels[i] = request->channels[i]->center_freq;
        }
 
-       ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
-                                      false, 0, 0, n_channels, channels);
+       if (test_bit(CONNECTED, &vif->flags))
+               force_fg_scan = 1;
+
+       ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx, WMI_LONG_SCAN,
+                                      force_fg_scan, false, 0, 0, n_channels,
+                                      channels);
        if (ret)
                ath6kl_err("wmi_startscan_cmd failed\n");
        else
-               ar->scan_req = request;
+               vif->scan_req = request;
 
        kfree(channels);
 
        return ret;
 }
 
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
 {
+       struct ath6kl *ar = vif->ar;
        int i;
 
-       ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+       ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
+                  aborted ? " aborted" : "");
 
-       if (!ar->scan_req)
+       if (!vif->scan_req)
                return;
 
-       if ((status == -ECANCELED) || (status == -EBUSY)) {
-               cfg80211_scan_done(ar->scan_req, true);
+       if (aborted)
                goto out;
-       }
-
-       cfg80211_scan_done(ar->scan_req, false);
 
-       if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
-               for (i = 0; i < ar->scan_req->n_ssids; i++) {
-                       ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
-                                                 DISABLE_SSID_FLAG,
+       if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
+               for (i = 0; i < vif->scan_req->n_ssids; i++) {
+                       ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+                                                 i + 1, DISABLE_SSID_FLAG,
                                                  0, NULL);
                }
        }
 
 out:
-       ar->scan_req = NULL;
+       cfg80211_scan_done(vif->scan_req, aborted);
+       vif->scan_req = NULL;
 }
 
 static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -748,14 +906,21 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                                   struct key_params *params)
 {
        struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+       struct ath6kl_vif *vif = netdev_priv(ndev);
        struct ath6kl_key *key = NULL;
        u8 key_usage;
        u8 key_type;
-       int status = 0;
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
+       if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
+               if (params->key_len != WMI_KRK_LEN)
+                       return -EINVAL;
+               return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
+                                             params->key);
+       }
+
        if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                           "%s: key index %d out of bounds\n", __func__,
@@ -763,7 +928,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                return -ENOENT;
        }
 
-       key = &ar->keys[key_index];
+       key = &vif->keys[key_index];
        memset(key, 0, sizeof(struct ath6kl_key));
 
        if (pairwise)
@@ -801,26 +966,26 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                return -ENOTSUPP;
        }
 
-       if (((ar->auth_mode == WPA_PSK_AUTH)
-            || (ar->auth_mode == WPA2_PSK_AUTH))
+       if (((vif->auth_mode == WPA_PSK_AUTH)
+            || (vif->auth_mode == WPA2_PSK_AUTH))
            && (key_usage & GROUP_USAGE))
-               del_timer(&ar->disconnect_timer);
+               del_timer(&vif->disconnect_timer);
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                   "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
                   __func__, key_index, key->key_len, key_type,
                   key_usage, key->seq_len);
 
-       ar->def_txkey_index = key_index;
+       vif->def_txkey_index = key_index;
 
-       if (ar->nw_type == AP_NETWORK && !pairwise &&
+       if (vif->nw_type == AP_NETWORK && !pairwise &&
            (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
                ar->ap_mode_bkey.valid = true;
                ar->ap_mode_bkey.key_index = key_index;
                ar->ap_mode_bkey.key_type = key_type;
                ar->ap_mode_bkey.key_len = key->key_len;
                memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
-               if (!test_bit(CONNECTED, &ar->flag)) {
+               if (!test_bit(CONNECTED, &vif->flags)) {
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
                                   "key configuration until AP mode has been "
                                   "started\n");
@@ -832,8 +997,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                }
        }
 
-       if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
-           !test_bit(CONNECTED, &ar->flag)) {
+       if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+           !test_bit(CONNECTED, &vif->flags)) {
                /*
                 * Store the key locally so that it can be re-configured after
                 * the AP mode has properly started
@@ -841,20 +1006,18 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                 */
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
                           "until AP mode has been started\n");
-               ar->wep_key_list[key_index].key_len = key->key_len;
-               memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+               vif->wep_key_list[key_index].key_len = key->key_len;
+               memcpy(vif->wep_key_list[key_index].key, key->key,
+                      key->key_len);
                return 0;
        }
 
-       status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
-                                      key_type, key_usage, key->key_len,
-                                      key->seq, key->key, KEY_OP_INIT_VAL,
-                                      (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
-
-       if (status)
-               return -EIO;
-
-       return 0;
+       return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+                                    vif->def_txkey_index,
+                                    key_type, key_usage, key->key_len,
+                                    key->seq, key->seq_len, key->key,
+                                    KEY_OP_INIT_VAL,
+                                    (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
 }
 
 static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -862,10 +1025,11 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                                   const u8 *mac_addr)
 {
        struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+       struct ath6kl_vif *vif = netdev_priv(ndev);
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -875,15 +1039,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                return -ENOENT;
        }
 
-       if (!ar->keys[key_index].key_len) {
+       if (!vif->keys[key_index].key_len) {
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                           "%s: index %d is empty\n", __func__, key_index);
                return 0;
        }
 
-       ar->keys[key_index].key_len = 0;
+       vif->keys[key_index].key_len = 0;
 
-       return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+       return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
 }
 
 static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -892,13 +1056,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
                                   void (*callback) (void *cookie,
                                                     struct key_params *))
 {
-       struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+       struct ath6kl_vif *vif = netdev_priv(ndev);
        struct ath6kl_key *key = NULL;
        struct key_params params;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -908,7 +1072,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
                return -ENOENT;
        }
 
-       key = &ar->keys[key_index];
+       key = &vif->keys[key_index];
        memset(&params, 0, sizeof(params));
        params.cipher = key->cipher;
        params.key_len = key->key_len;
@@ -927,14 +1091,14 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
                                           bool multicast)
 {
        struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+       struct ath6kl_vif *vif = netdev_priv(ndev);
        struct ath6kl_key *key = NULL;
-       int status = 0;
        u8 key_usage;
        enum crypto_type key_type = NONE_CRYPT;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -944,43 +1108,41 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
                return -ENOENT;
        }
 
-       if (!ar->keys[key_index].key_len) {
+       if (!vif->keys[key_index].key_len) {
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
                           __func__, key_index);
                return -EINVAL;
        }
 
-       ar->def_txkey_index = key_index;
-       key = &ar->keys[ar->def_txkey_index];
+       vif->def_txkey_index = key_index;
+       key = &vif->keys[vif->def_txkey_index];
        key_usage = GROUP_USAGE;
-       if (ar->prwise_crypto == WEP_CRYPT)
+       if (vif->prwise_crypto == WEP_CRYPT)
                key_usage |= TX_USAGE;
        if (unicast)
-               key_type = ar->prwise_crypto;
+               key_type = vif->prwise_crypto;
        if (multicast)
-               key_type = ar->grp_crypto;
+               key_type = vif->grp_crypto;
 
-       if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+       if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
                return 0; /* Delay until AP mode has been started */
 
-       status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
-                                      key_type, key_usage,
-                                      key->key_len, key->seq, key->key,
-                                      KEY_OP_INIT_VAL, NULL,
-                                      SYNC_BOTH_WMIFLAG);
-       if (status)
-               return -EIO;
-
-       return 0;
+       return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+                                    vif->def_txkey_index,
+                                    key_type, key_usage,
+                                    key->key_len, key->seq, key->seq_len,
+                                    key->key,
+                                    KEY_OP_INIT_VAL, NULL,
+                                    SYNC_BOTH_WMIFLAG);
 }
 
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
                                       bool ismcast)
 {
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                   "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
 
-       cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+       cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
                                     (ismcast ? NL80211_KEYTYPE_GROUP :
                                      NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
                                     GFP_KERNEL);
@@ -989,12 +1151,17 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
 static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
 {
        struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+       struct ath6kl_vif *vif;
        int ret;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
                   changed);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -1017,12 +1184,17 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
                                       int dbm)
 {
        struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+       struct ath6kl_vif *vif;
        u8 ath6kl_dbm;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
                   type, dbm);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        switch (type) {
@@ -1037,7 +1209,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
                return -EOPNOTSUPP;
        }
 
-       ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+       ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
 
        return 0;
 }
@@ -1045,14 +1217,19 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
 static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
 {
        struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+       struct ath6kl_vif *vif;
 
-       if (!ath6kl_cfg80211_ready(ar))
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
                return -EIO;
 
-       if (test_bit(CONNECTED, &ar->flag)) {
+       if (!ath6kl_cfg80211_ready(vif))
+               return -EIO;
+
+       if (test_bit(CONNECTED, &vif->flags)) {
                ar->tx_pwr = 0;
 
-               if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+               if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
                        ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
                        return -EIO;
                }
@@ -1076,11 +1253,12 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
 {
        struct ath6kl *ar = ath6kl_priv(dev);
        struct wmi_power_mode_cmd mode;
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
                   __func__, pmgmt, timeout);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        if (pmgmt) {
@@ -1091,7 +1269,8 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
                mode.pwr_mode = MAX_PERF_POWER;
        }
 
-       if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+       if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
+            mode.pwr_mode) != 0) {
                ath6kl_err("wmi_powermode_cmd failed\n");
                return -EIO;
        }
@@ -1099,41 +1278,86 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
        return 0;
 }
 
+static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+                                                   char *name,
+                                                   enum nl80211_iftype type,
+                                                   u32 *flags,
+                                                   struct vif_params *params)
+{
+       struct ath6kl *ar = wiphy_priv(wiphy);
+       struct net_device *ndev;
+       u8 if_idx, nw_type;
+
+       if (ar->num_vif == MAX_NUM_VIF) {
+               ath6kl_err("Reached maximum number of supported vif\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
+               ath6kl_err("Not a supported interface type\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+       if (!ndev)
+               return ERR_PTR(-ENOMEM);
+
+       ar->num_vif++;
+
+       return ndev;
+}
+
+static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
+                                    struct net_device *ndev)
+{
+       struct ath6kl *ar = wiphy_priv(wiphy);
+       struct ath6kl_vif *vif = netdev_priv(ndev);
+
+       spin_lock_bh(&ar->list_lock);
+       list_del(&vif->list);
+       spin_unlock_bh(&ar->list_lock);
+
+       ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+
+       ath6kl_deinit_if_data(vif);
+
+       return 0;
+}
+
 static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
                                        struct net_device *ndev,
                                        enum nl80211_iftype type, u32 *flags,
                                        struct vif_params *params)
 {
-       struct ath6kl *ar = ath6kl_priv(ndev);
-       struct wireless_dev *wdev = ar->wdev;
+       struct ath6kl_vif *vif = netdev_priv(ndev);
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        switch (type) {
        case NL80211_IFTYPE_STATION:
-               ar->next_mode = INFRA_NETWORK;
+               vif->next_mode = INFRA_NETWORK;
                break;
        case NL80211_IFTYPE_ADHOC:
-               ar->next_mode = ADHOC_NETWORK;
+               vif->next_mode = ADHOC_NETWORK;
                break;
        case NL80211_IFTYPE_AP:
-               ar->next_mode = AP_NETWORK;
+               vif->next_mode = AP_NETWORK;
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
-               ar->next_mode = INFRA_NETWORK;
+               vif->next_mode = INFRA_NETWORK;
                break;
        case NL80211_IFTYPE_P2P_GO:
-               ar->next_mode = AP_NETWORK;
+               vif->next_mode = AP_NETWORK;
                break;
        default:
                ath6kl_err("invalid interface type %u\n", type);
                return -EOPNOTSUPP;
        }
 
-       wdev->iftype = type;
+       vif->wdev.iftype = type;
 
        return 0;
 }
@@ -1143,16 +1367,17 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
                                     struct cfg80211_ibss_params *ibss_param)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
        int status;
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
-       ar->ssid_len = ibss_param->ssid_len;
-       memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+       vif->ssid_len = ibss_param->ssid_len;
+       memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
 
        if (ibss_param->channel)
-               ar->ch_hint = ibss_param->channel->center_freq;
+               vif->ch_hint = ibss_param->channel->center_freq;
 
        if (ibss_param->channel_fixed) {
                /*
@@ -1164,44 +1389,45 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
                return -EOPNOTSUPP;
        }
 
-       memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+       memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
        if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
-               memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+               memcpy(vif->req_bssid, ibss_param->bssid,
+                      sizeof(vif->req_bssid));
 
-       ath6kl_set_wpa_version(ar, 0);
+       ath6kl_set_wpa_version(vif, 0);
 
-       status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+       status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
        if (status)
                return status;
 
        if (ibss_param->privacy) {
-               ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
-               ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+               ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
+               ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
        } else {
-               ath6kl_set_cipher(ar, 0, true);
-               ath6kl_set_cipher(ar, 0, false);
+               ath6kl_set_cipher(vif, 0, true);
+               ath6kl_set_cipher(vif, 0, false);
        }
 
-       ar->nw_type = ar->next_mode;
+       vif->nw_type = vif->next_mode;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                   "%s: connect called with authmode %d dot11 auth %d"
                   " PW crypto %d PW crypto len %d GRP crypto %d"
                   " GRP crypto len %d channel hint %u\n",
                   __func__,
-                  ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
-                  ar->prwise_crypto_len, ar->grp_crypto,
-                  ar->grp_crypto_len, ar->ch_hint);
-
-       status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
-                                       ar->dot11_auth_mode, ar->auth_mode,
-                                       ar->prwise_crypto,
-                                       ar->prwise_crypto_len,
-                                       ar->grp_crypto, ar->grp_crypto_len,
-                                       ar->ssid_len, ar->ssid,
-                                       ar->req_bssid, ar->ch_hint,
+                  vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+                  vif->prwise_crypto_len, vif->grp_crypto,
+                  vif->grp_crypto_len, vif->ch_hint);
+
+       status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+                                       vif->dot11_auth_mode, vif->auth_mode,
+                                       vif->prwise_crypto,
+                                       vif->prwise_crypto_len,
+                                       vif->grp_crypto, vif->grp_crypto_len,
+                                       vif->ssid_len, vif->ssid,
+                                       vif->req_bssid, vif->ch_hint,
                                        ar->connect_ctrl_flags);
-       set_bit(CONNECT_PEND, &ar->flag);
+       set_bit(CONNECT_PEND, &vif->flags);
 
        return 0;
 }
@@ -1209,14 +1435,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
 static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
                                      struct net_device *dev)
 {
-       struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
-       ath6kl_disconnect(ar);
-       memset(ar->ssid, 0, sizeof(ar->ssid));
-       ar->ssid_len = 0;
+       ath6kl_disconnect(vif);
+       memset(vif->ssid, 0, sizeof(vif->ssid));
+       vif->ssid_len = 0;
 
        return 0;
 }
@@ -1226,6 +1452,7 @@ static const u32 cipher_suites[] = {
        WLAN_CIPHER_SUITE_WEP104,
        WLAN_CIPHER_SUITE_TKIP,
        WLAN_CIPHER_SUITE_CCMP,
+       CCKM_KRK_CIPHER_SUITE,
 };
 
 static bool is_rate_legacy(s32 rate)
@@ -1293,21 +1520,22 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
                              u8 *mac, struct station_info *sinfo)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
        long left;
        bool sgi;
        s32 rate;
        int ret;
        u8 mcs;
 
-       if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+       if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
                return -ENOENT;
 
        if (down_interruptible(&ar->sem))
                return -EBUSY;
 
-       set_bit(STATS_UPDATE_PEND, &ar->flag);
+       set_bit(STATS_UPDATE_PEND, &vif->flags);
 
-       ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+       ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
 
        if (ret != 0) {
                up(&ar->sem);
@@ -1316,7 +1544,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
 
        left = wait_event_interruptible_timeout(ar->event_wq,
                                                !test_bit(STATS_UPDATE_PEND,
-                                                         &ar->flag),
+                                                         &vif->flags),
                                                WMI_TIMEOUT);
 
        up(&ar->sem);
@@ -1326,24 +1554,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
        else if (left < 0)
                return left;
 
-       if (ar->target_stats.rx_byte) {
-               sinfo->rx_bytes = ar->target_stats.rx_byte;
+       if (vif->target_stats.rx_byte) {
+               sinfo->rx_bytes = vif->target_stats.rx_byte;
                sinfo->filled |= STATION_INFO_RX_BYTES;
-               sinfo->rx_packets = ar->target_stats.rx_pkt;
+               sinfo->rx_packets = vif->target_stats.rx_pkt;
                sinfo->filled |= STATION_INFO_RX_PACKETS;
        }
 
-       if (ar->target_stats.tx_byte) {
-               sinfo->tx_bytes = ar->target_stats.tx_byte;
+       if (vif->target_stats.tx_byte) {
+               sinfo->tx_bytes = vif->target_stats.tx_byte;
                sinfo->filled |= STATION_INFO_TX_BYTES;
-               sinfo->tx_packets = ar->target_stats.tx_pkt;
+               sinfo->tx_packets = vif->target_stats.tx_pkt;
                sinfo->filled |= STATION_INFO_TX_PACKETS;
        }
 
-       sinfo->signal = ar->target_stats.cs_rssi;
+       sinfo->signal = vif->target_stats.cs_rssi;
        sinfo->filled |= STATION_INFO_SIGNAL;
 
-       rate = ar->target_stats.tx_ucast_rate;
+       rate = vif->target_stats.tx_ucast_rate;
 
        if (is_rate_legacy(rate)) {
                sinfo->txrate.legacy = rate / 100;
@@ -1375,13 +1603,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
 
        sinfo->filled |= STATION_INFO_TX_BITRATE;
 
-       if (test_bit(CONNECTED, &ar->flag) &&
-           test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
-           ar->nw_type == INFRA_NETWORK) {
+       if (test_bit(CONNECTED, &vif->flags) &&
+           test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
+           vif->nw_type == INFRA_NETWORK) {
                sinfo->filled |= STATION_INFO_BSS_PARAM;
                sinfo->bss_param.flags = 0;
-               sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
-               sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+               sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
+               sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
        }
 
        return 0;
@@ -1391,7 +1619,9 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
                            struct cfg80211_pmksa *pmksa)
 {
        struct ath6kl *ar = ath6kl_priv(netdev);
-       return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+       struct ath6kl_vif *vif = netdev_priv(netdev);
+
+       return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
                                       pmksa->pmkid, true);
 }
 
@@ -1399,25 +1629,292 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
                            struct cfg80211_pmksa *pmksa)
 {
        struct ath6kl *ar = ath6kl_priv(netdev);
-       return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+       struct ath6kl_vif *vif = netdev_priv(netdev);
+
+       return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
                                       pmksa->pmkid, false);
 }
 
 static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
 {
        struct ath6kl *ar = ath6kl_priv(netdev);
-       if (test_bit(CONNECTED, &ar->flag))
-               return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+       struct ath6kl_vif *vif = netdev_priv(netdev);
+
+       if (test_bit(CONNECTED, &vif->flags))
+               return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
+                                              vif->bssid, NULL, false);
+       return 0;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+       struct ath6kl_vif *vif;
+       int ret, pos, left;
+       u32 filter = 0;
+       u16 i;
+       u8 mask[WOW_MASK_SIZE];
+
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       if (!ath6kl_cfg80211_ready(vif))
+               return -EIO;
+
+       if (!test_bit(CONNECTED, &vif->flags))
+               return -EINVAL;
+
+       /* Clear existing WOW patterns */
+       for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+               ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+                                              WOW_LIST_ID, i);
+       /* Configure new WOW patterns */
+       for (i = 0; i < wow->n_patterns; i++) {
+
+               /*
+                * Convert given nl80211 specific mask value to equivalent
+                * driver specific mask value and send it to the chip along
+                * with patterns. For example, If the mask value defined in
+                * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
+                * then equivalent driver specific mask value is
+                * "0xFF 0x00 0xFF 0x00".
+                */
+               memset(&mask, 0, sizeof(mask));
+               for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
+                       if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
+                               mask[pos] = 0xFF;
+               }
+               /*
+                * Note: Pattern's offset is not passed as part of wowlan
+                * parameter from CFG layer. So it's always passed as ZERO
+                * to the firmware. It means, given WOW patterns are always
+                * matched from the first byte of received pkt in the firmware.
+                */
+               ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+                                       vif->fw_vif_idx, WOW_LIST_ID,
+                                       wow->patterns[i].pattern_len,
+                                       0 /* pattern offset */,
+                                       wow->patterns[i].pattern, mask);
+               if (ret)
+                       return ret;
+       }
+
+       if (wow->disconnect)
+               filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+
+       if (wow->magic_pkt)
+               filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+
+       if (wow->gtk_rekey_failure)
+               filter |= WOW_FILTER_OPTION_GTK_ERROR;
+
+       if (wow->eap_identity_req)
+               filter |= WOW_FILTER_OPTION_EAP_REQ;
+
+       if (wow->four_way_handshake)
+               filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+       ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+                                         ATH6KL_WOW_MODE_ENABLE,
+                                         filter,
+                                         WOW_HOST_REQ_DELAY);
+       if (ret)
+               return ret;
+
+       ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+                                                ATH6KL_HOST_MODE_ASLEEP);
+       if (ret)
+               return ret;
+
+       if (ar->tx_pending[ar->ctrl_ep]) {
+               left = wait_event_interruptible_timeout(ar->event_wq,
+                               ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
+               if (left == 0) {
+                       ath6kl_warn("clear wmi ctrl data timeout\n");
+                       ret = -ETIMEDOUT;
+               } else if (left < 0) {
+                       ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+                       ret = left;
+               }
+       }
+
+       return ret;
+}
+
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+       struct ath6kl_vif *vif;
+       int ret;
+
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+                                                ATH6KL_HOST_MODE_AWAKE);
+       return ret;
+}
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+                           enum ath6kl_cfg_suspend_mode mode,
+                           struct cfg80211_wowlan *wow)
+{
+       int ret;
+
+       switch (mode) {
+       case ATH6KL_CFG_SUSPEND_WOW:
+
+               ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
+
+               /* Flush all non control pkts in TX path */
+               ath6kl_tx_data_cleanup(ar);
+
+               ret = ath6kl_wow_suspend(ar, wow);
+               if (ret) {
+                       ath6kl_err("wow suspend failed: %d\n", ret);
+                       return ret;
+               }
+               ar->state = ATH6KL_STATE_WOW;
+               break;
+
+       case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
+
+               ath6kl_cfg80211_stop(ar);
+
+               /* save the current power mode before enabling power save */
+               ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+               ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+               if (ret) {
+                       ath6kl_warn("wmi powermode command failed during suspend: %d\n",
+                                   ret);
+               }
+
+               ar->state = ATH6KL_STATE_DEEPSLEEP;
+
+               break;
+
+       case ATH6KL_CFG_SUSPEND_CUTPOWER:
+
+               ath6kl_cfg80211_stop(ar);
+
+               if (ar->state == ATH6KL_STATE_OFF) {
+                       ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+                                  "suspend hw off, no action for cutpower\n");
+                       break;
+               }
+
+               ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
+
+               ret = ath6kl_init_hw_stop(ar);
+               if (ret) {
+                       ath6kl_warn("failed to stop hw during suspend: %d\n",
+                                   ret);
+               }
+
+               ar->state = ATH6KL_STATE_CUTPOWER;
+
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar)
+{
+       int ret;
+
+       switch (ar->state) {
+       case  ATH6KL_STATE_WOW:
+               ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
+
+               ret = ath6kl_wow_resume(ar);
+               if (ret) {
+                       ath6kl_warn("wow mode resume failed: %d\n", ret);
+                       return ret;
+               }
+
+               ar->state = ATH6KL_STATE_ON;
+               break;
+
+       case ATH6KL_STATE_DEEPSLEEP:
+               if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+                       ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+                                                      ar->wmi->saved_pwr_mode);
+                       if (ret) {
+                               ath6kl_warn("wmi powermode command failed during resume: %d\n",
+                                           ret);
+                       }
+               }
+
+               ar->state = ATH6KL_STATE_ON;
+
+               break;
+
+       case ATH6KL_STATE_CUTPOWER:
+               ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
+
+               ret = ath6kl_init_hw_start(ar);
+               if (ret) {
+                       ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
+                       return ret;
+               }
+               break;
+
+       default:
+               break;
+       }
+
        return 0;
 }
 
 #ifdef CONFIG_PM
-static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+
+/* hif layer decides what suspend mode to use */
+static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
                                 struct cfg80211_wowlan *wow)
 {
        struct ath6kl *ar = wiphy_priv(wiphy);
 
-       return ath6kl_hif_suspend(ar);
+       return ath6kl_hif_suspend(ar, wow);
+}
+
+static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
+{
+       struct ath6kl *ar = wiphy_priv(wiphy);
+
+       return ath6kl_hif_resume(ar);
+}
+
+/*
+ * FIXME: WOW suspend mode is selected if the host sdio controller supports
+ * both sdio irq wake up and keep power. The target pulls sdio data line to
+ * wake up the host when WOW pattern matches. This causes sdio irq handler
+ * is being called in the host side which internally hits ath6kl's RX path.
+ *
+ * Since sdio interrupt is not disabled, RX path executes even before
+ * the host executes the actual resume operation from PM module.
+ *
+ * In the current scenario, WOW resume should happen before start processing
+ * any data from the target. So It's required to perform WOW resume in RX path.
+ * Ideally we should perform WOW resume only in the actual platform
+ * resume path. This area needs bit rework to avoid WOW resume in RX path.
+ *
+ * ath6kl_check_wow_status() is called from ath6kl_rx().
+ */
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+       if (ar->state == ATH6KL_STATE_WOW)
+               ath6kl_cfg80211_resume(ar);
+}
+
+#else
+
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
 }
 #endif
 
@@ -1425,14 +1922,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
                              struct ieee80211_channel *chan,
                              enum nl80211_channel_type channel_type)
 {
-       struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
                   __func__, chan->center_freq, chan->hw_value);
-       ar->next_chan = chan->center_freq;
+       vif->next_chan = chan->center_freq;
 
        return 0;
 }
@@ -1444,9 +1941,10 @@ static bool ath6kl_is_p2p_ie(const u8 *pos)
                pos[4] == 0x9a && pos[5] == 0x09;
 }
 
-static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
-                                       size_t ies_len)
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
+                                       const u8 *ies, size_t ies_len)
 {
+       struct ath6kl *ar = vif->ar;
        const u8 *pos;
        u8 *buf = NULL;
        size_t len = 0;
@@ -1473,8 +1971,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
                }
        }
 
-       ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
-                                      buf, len);
+       ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+                                      WMI_FRAME_PROBE_RESP, buf, len);
        kfree(buf);
        return ret;
 }
@@ -1483,6 +1981,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
                            struct beacon_parameters *info, bool add)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
        struct ieee80211_mgmt *mgmt;
        u8 *ies;
        int ies_len;
@@ -1492,27 +1991,29 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
 
-       if (!ath6kl_cfg80211_ready(ar))
+       if (!ath6kl_cfg80211_ready(vif))
                return -EIO;
 
-       if (ar->next_mode != AP_NETWORK)
+       if (vif->next_mode != AP_NETWORK)
                return -EOPNOTSUPP;
 
        if (info->beacon_ies) {
-               res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+               res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+                                              WMI_FRAME_BEACON,
                                               info->beacon_ies,
                                               info->beacon_ies_len);
                if (res)
                        return res;
        }
        if (info->proberesp_ies) {
-               res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+               res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
                                                   info->proberesp_ies_len);
                if (res)
                        return res;
        }
        if (info->assocresp_ies) {
-               res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+               res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+                                              WMI_FRAME_ASSOC_RESP,
                                               info->assocresp_ies,
                                               info->assocresp_ies_len);
                if (res)
@@ -1539,12 +2040,12 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
 
        if (info->ssid == NULL)
                return -EINVAL;
-       memcpy(ar->ssid, info->ssid, info->ssid_len);
-       ar->ssid_len = info->ssid_len;
+       memcpy(vif->ssid, info->ssid, info->ssid_len);
+       vif->ssid_len = info->ssid_len;
        if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
                return -EOPNOTSUPP; /* TODO */
 
-       ar->dot11_auth_mode = OPEN_AUTH;
+       vif->dot11_auth_mode = OPEN_AUTH;
 
        memset(&p, 0, sizeof(p));
 
@@ -1566,7 +2067,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
        }
        if (p.auth_mode == 0)
                p.auth_mode = NONE_AUTH;
-       ar->auth_mode = p.auth_mode;
+       vif->auth_mode = p.auth_mode;
 
        for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
                switch (info->crypto.ciphers_pairwise[i]) {
@@ -1584,9 +2085,9 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
        }
        if (p.prwise_crypto_type == 0) {
                p.prwise_crypto_type = NONE_CRYPT;
-               ath6kl_set_cipher(ar, 0, true);
+               ath6kl_set_cipher(vif, 0, true);
        } else if (info->crypto.n_ciphers_pairwise == 1)
-               ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+               ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
 
        switch (info->crypto.cipher_group) {
        case WLAN_CIPHER_SUITE_WEP40:
@@ -1603,17 +2104,17 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
                p.grp_crypto_type = NONE_CRYPT;
                break;
        }
-       ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+       ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
 
        p.nw_type = AP_NETWORK;
-       ar->nw_type = ar->next_mode;
+       vif->nw_type = vif->next_mode;
 
-       p.ssid_len = ar->ssid_len;
-       memcpy(p.ssid, ar->ssid, ar->ssid_len);
-       p.dot11_auth_mode = ar->dot11_auth_mode;
-       p.ch = cpu_to_le16(ar->next_chan);
+       p.ssid_len = vif->ssid_len;
+       memcpy(p.ssid, vif->ssid, vif->ssid_len);
+       p.dot11_auth_mode = vif->dot11_auth_mode;
+       p.ch = cpu_to_le16(vif->next_chan);
 
-       res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+       res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
        if (res < 0)
                return res;
 
@@ -1635,14 +2136,15 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
 static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       if (ar->nw_type != AP_NETWORK)
+       if (vif->nw_type != AP_NETWORK)
                return -EOPNOTSUPP;
-       if (!test_bit(CONNECTED, &ar->flag))
+       if (!test_bit(CONNECTED, &vif->flags))
                return -ENOTCONN;
 
-       ath6kl_wmi_disconnect_cmd(ar->wmi);
-       clear_bit(CONNECTED, &ar->flag);
+       ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+       clear_bit(CONNECTED, &vif->flags);
 
        return 0;
 }
@@ -1651,8 +2153,9 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
                                 u8 *mac, struct station_parameters *params)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       if (ar->nw_type != AP_NETWORK)
+       if (vif->nw_type != AP_NETWORK)
                return -EOPNOTSUPP;
 
        /* Use this only for authorizing/unauthorizing a station */
@@ -1660,10 +2163,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
                return -EOPNOTSUPP;
 
        if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
-               return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
-                                             mac, 0);
-       return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
-                                     0);
+               return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+                                             WMI_AP_MLME_AUTHORIZE, mac, 0);
+       return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+                                     WMI_AP_MLME_UNAUTHORIZE, mac, 0);
 }
 
 static int ath6kl_remain_on_channel(struct wiphy *wiphy,
@@ -1674,13 +2177,20 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
                                    u64 *cookie)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
+       u32 id;
 
        /* TODO: if already pending or ongoing remain-on-channel,
         * return -EBUSY */
-       *cookie = 1; /* only a single pending request is supported */
+       id = ++vif->last_roc_id;
+       if (id == 0) {
+               /* Do not use 0 as the cookie value */
+               id = ++vif->last_roc_id;
+       }
+       *cookie = id;
 
-       return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
-                                            duration);
+       return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
+                                            chan->center_freq, duration);
 }
 
 static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1688,16 +2198,20 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
                                           u64 cookie)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       if (cookie != 1)
+       if (cookie != vif->last_roc_id)
                return -ENOENT;
+       vif->last_cancel_roc_id = cookie;
 
-       return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+       return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
 }
 
-static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
-                                    size_t len, unsigned int freq)
+static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
+                                    const u8 *buf, size_t len,
+                                    unsigned int freq)
 {
+       struct ath6kl *ar = vif->ar;
        const u8 *pos;
        u8 *p2p;
        int p2p_len;
@@ -1724,8 +2238,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
                pos += 2 + pos[1];
        }
 
-       ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
-                                                p2p, p2p_len);
+       ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
+                                                mgmt->da, p2p, p2p_len);
        kfree(p2p);
        return ret;
 }
@@ -1734,36 +2248,39 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
                          struct ieee80211_channel *chan, bool offchan,
                          enum nl80211_channel_type channel_type,
                          bool channel_type_valid, unsigned int wait,
-                         const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+                         const u8 *buf, size_t len, bool no_cck,
+                         bool dont_wait_for_ack, u64 *cookie)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
        u32 id;
        const struct ieee80211_mgmt *mgmt;
 
        mgmt = (const struct ieee80211_mgmt *) buf;
        if (buf + len >= mgmt->u.probe_resp.variable &&
-           ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+           vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
            ieee80211_is_probe_resp(mgmt->frame_control)) {
                /*
                 * Send Probe Response frame in AP mode using a separate WMI
                 * command to allow the target to fill in the generic IEs.
                 */
                *cookie = 0; /* TX status not supported */
-               return ath6kl_send_go_probe_resp(ar, buf, len,
+               return ath6kl_send_go_probe_resp(vif, buf, len,
                                                 chan->center_freq);
        }
 
-       id = ar->send_action_id++;
+       id = vif->send_action_id++;
        if (id == 0) {
                /*
                 * 0 is a reserved value in the WMI command and shall not be
                 * used for the command.
                 */
-               id = ar->send_action_id++;
+               id = vif->send_action_id++;
        }
 
        *cookie = id;
-       return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
+       return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
+                                         chan->center_freq, wait,
                                          buf, len);
 }
 
@@ -1771,7 +2288,7 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
                                       struct net_device *dev,
                                       u16 frame_type, bool reg)
 {
-       struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
                   __func__, frame_type, reg);
@@ -1781,7 +2298,7 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
                 * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
                 * hardcode target to report Probe Request frames all the time.
                 */
-               ar->probe_req_report = reg;
+               vif->probe_req_report = reg;
        }
 }
 
@@ -1808,6 +2325,8 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
 };
 
 static struct cfg80211_ops ath6kl_cfg80211_ops = {
+       .add_virtual_intf = ath6kl_cfg80211_add_iface,
+       .del_virtual_intf = ath6kl_cfg80211_del_iface,
        .change_virtual_intf = ath6kl_cfg80211_change_iface,
        .scan = ath6kl_cfg80211_scan,
        .connect = ath6kl_cfg80211_connect,
@@ -1828,7 +2347,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
        .flush_pmksa = ath6kl_flush_pmksa,
        CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
 #ifdef CONFIG_PM
-       .suspend = ar6k_cfg80211_suspend,
+       .suspend = __ath6kl_cfg80211_suspend,
+       .resume = __ath6kl_cfg80211_resume,
 #endif
        .set_channel = ath6kl_set_channel,
        .add_beacon = ath6kl_add_beacon,
@@ -1841,76 +2361,269 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
        .mgmt_frame_register = ath6kl_mgmt_frame_register,
 };
 
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+void ath6kl_cfg80211_stop(struct ath6kl *ar)
 {
-       int ret = 0;
-       struct wireless_dev *wdev;
-       struct ath6kl *ar;
+       struct ath6kl_vif *vif;
 
-       wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
-       if (!wdev) {
-               ath6kl_err("couldn't allocate wireless device\n");
-               return NULL;
+       /* FIXME: for multi vif */
+       vif = ath6kl_vif_first(ar);
+       if (!vif) {
+               /* save the current power mode before enabling power save */
+               ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+               if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
+                       ath6kl_warn("ath6kl_deep_sleep_enable: "
+                                   "wmi_powermode_cmd failed\n");
+               return;
        }
 
+       switch (vif->sme_state) {
+       case SME_CONNECTING:
+               cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
+                                       NULL, 0,
+                                       WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                       GFP_KERNEL);
+               break;
+       case SME_CONNECTED:
+       default:
+               /*
+                * FIXME: oddly enough smeState is in DISCONNECTED during
+                * suspend, why? Need to send disconnected event in that
+                * state.
+                */
+               cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+               break;
+       }
+
+       if (test_bit(CONNECTED, &vif->flags) ||
+           test_bit(CONNECT_PEND, &vif->flags))
+               ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+
+       vif->sme_state = SME_DISCONNECTED;
+       clear_bit(CONNECTED, &vif->flags);
+       clear_bit(CONNECT_PEND, &vif->flags);
+
+       /* disable scanning */
+       if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF, 0, 0,
+                                     0, 0, 0, 0, 0, 0, 0) != 0)
+               printk(KERN_WARNING "ath6kl: failed to disable scan "
+                      "during suspend\n");
+
+       ath6kl_cfg80211_scan_complete_event(vif, true);
+}
+
+struct ath6kl *ath6kl_core_alloc(struct device *dev)
+{
+       struct ath6kl *ar;
+       struct wiphy *wiphy;
+       u8 ctr;
+
        /* create a new wiphy for use with cfg80211 */
-       wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
-       if (!wdev->wiphy) {
+       wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+       if (!wiphy) {
                ath6kl_err("couldn't allocate wiphy device\n");
-               kfree(wdev);
                return NULL;
        }
 
-       ar = wiphy_priv(wdev->wiphy);
-       ar->p2p = !!ath6kl_p2p;
+       ar = wiphy_priv(wiphy);
+       if (!multi_norm_if_support)
+               ar->p2p = !!ath6kl_p2p;
+       ar->wiphy = wiphy;
+       ar->dev = dev;
+
+       if (multi_norm_if_support)
+               ar->max_norm_iface = 2;
+       else
+               ar->max_norm_iface = 1;
+
+       /* FIXME: Remove this once the multivif support is enabled */
+       ar->max_norm_iface = 1;
+
+       spin_lock_init(&ar->lock);
+       spin_lock_init(&ar->mcastpsq_lock);
+       spin_lock_init(&ar->list_lock);
 
-       wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+       init_waitqueue_head(&ar->event_wq);
+       sema_init(&ar->sem, 1);
 
-       wdev->wiphy->max_remain_on_channel_duration = 5000;
+       INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+       INIT_LIST_HEAD(&ar->vif_list);
+
+       clear_bit(WMI_ENABLED, &ar->flag);
+       clear_bit(SKIP_SCAN, &ar->flag);
+       clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+       ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+       ar->listen_intvl_b = 0;
+       ar->tx_pwr = 0;
+
+       ar->intra_bss = 1;
+       memset(&ar->sc_params, 0, sizeof(ar->sc_params));
+       ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
+       ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
+       ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+       ar->state = ATH6KL_STATE_OFF;
+
+       memset((u8 *)ar->sta_list, 0,
+              AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+       /* Init the PS queues */
+       for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+               spin_lock_init(&ar->sta_list[ctr].psq_lock);
+               skb_queue_head_init(&ar->sta_list[ctr].psq);
+       }
+
+       skb_queue_head_init(&ar->mcastpsq);
+
+       memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+       return ar;
+}
+
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
+{
+       struct wiphy *wiphy = ar->wiphy;
+       int ret;
+
+       wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+       wiphy->max_remain_on_channel_duration = 5000;
 
        /* set device pointer for wiphy */
-       set_wiphy_dev(wdev->wiphy, dev);
+       set_wiphy_dev(wiphy, ar->dev);
 
-       wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_ADHOC) |
+                                BIT(NL80211_IFTYPE_AP);
        if (ar->p2p) {
-               wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
-                       BIT(NL80211_IFTYPE_P2P_CLIENT);
+               wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+                                         BIT(NL80211_IFTYPE_P2P_CLIENT);
        }
-       /* max num of ssids that can be probed during scanning */
-       wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
-       wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
-       wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
-       wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
-       wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
-       wdev->wiphy->cipher_suites = cipher_suites;
-       wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
-       ret = wiphy_register(wdev->wiphy);
+       /* max num of ssids that can be probed during scanning */
+       wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+       wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+       wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+       wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+       wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+       wiphy->cipher_suites = cipher_suites;
+       wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+       wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+                             WIPHY_WOWLAN_DISCONNECT |
+                             WIPHY_WOWLAN_GTK_REKEY_FAILURE  |
+                             WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+                             WIPHY_WOWLAN_EAP_IDENTITY_REQ   |
+                             WIPHY_WOWLAN_4WAY_HANDSHAKE;
+       wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
+       wiphy->wowlan.pattern_min_len = 1;
+       wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+
+       ret = wiphy_register(wiphy);
        if (ret < 0) {
                ath6kl_err("couldn't register wiphy device\n");
-               wiphy_free(wdev->wiphy);
-               kfree(wdev);
-               return NULL;
+               return ret;
        }
 
-       return wdev;
+       return 0;
 }
 
-void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+static int ath6kl_init_if_data(struct ath6kl_vif *vif)
 {
-       struct wireless_dev *wdev = ar->wdev;
-
-       if (ar->scan_req) {
-               cfg80211_scan_done(ar->scan_req, true);
-               ar->scan_req = NULL;
+       vif->aggr_cntxt = aggr_init(vif->ndev);
+       if (!vif->aggr_cntxt) {
+               ath6kl_err("failed to initialize aggr\n");
+               return -ENOMEM;
        }
 
-       if (!wdev)
-               return;
+       setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
+                   (unsigned long) vif->ndev);
+       set_bit(WMM_ENABLED, &vif->flags);
+       spin_lock_init(&vif->if_lock);
+
+       return 0;
+}
+
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
+{
+       struct ath6kl *ar = vif->ar;
+
+       aggr_module_destroy(vif->aggr_cntxt);
+
+       ar->avail_idx_map |= BIT(vif->fw_vif_idx);
 
-       wiphy_unregister(wdev->wiphy);
-       wiphy_free(wdev->wiphy);
-       kfree(wdev);
+       if (vif->nw_type == ADHOC_NETWORK)
+               ar->ibss_if_active = false;
+
+       unregister_netdevice(vif->ndev);
+
+       ar->num_vif--;
+}
+
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+                                       enum nl80211_iftype type, u8 fw_vif_idx,
+                                       u8 nw_type)
+{
+       struct net_device *ndev;
+       struct ath6kl_vif *vif;
+
+       ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+       if (!ndev)
+               return NULL;
+
+       vif = netdev_priv(ndev);
+       ndev->ieee80211_ptr = &vif->wdev;
+       vif->wdev.wiphy = ar->wiphy;
+       vif->ar = ar;
+       vif->ndev = ndev;
+       SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
+       vif->wdev.netdev = ndev;
+       vif->wdev.iftype = type;
+       vif->fw_vif_idx = fw_vif_idx;
+       vif->nw_type = vif->next_mode = nw_type;
+
+       memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+       if (fw_vif_idx != 0)
+               ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
+                                    0x2;
+
+       init_netdev(ndev);
+
+       ath6kl_init_control_info(vif);
+
+       /* TODO: Pass interface specific pointer instead of ar */
+       if (ath6kl_init_if_data(vif))
+               goto err;
+
+       if (register_netdevice(ndev))
+               goto err;
+
+       ar->avail_idx_map &= ~BIT(fw_vif_idx);
+       vif->sme_state = SME_DISCONNECTED;
+       set_bit(WLAN_ENABLED, &vif->flags);
+       ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+       set_bit(NETDEV_REGISTERED, &vif->flags);
+
+       if (type == NL80211_IFTYPE_ADHOC)
+               ar->ibss_if_active = true;
+
+       spin_lock_bh(&ar->list_lock);
+       list_add_tail(&vif->list, &ar->vif_list);
+       spin_unlock_bh(&ar->list_lock);
+
+       return ndev;
+
+err:
+       aggr_module_destroy(vif->aggr_cntxt);
+       free_netdev(ndev);
+       return NULL;
+}
+
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
+{
+       wiphy_unregister(ar->wiphy);
+       wiphy_free(ar->wiphy);
 }
index a84adc2..59fa9d8 100644 (file)
 #ifndef ATH6KL_CFG80211_H
 #define ATH6KL_CFG80211_H
 
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
-void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+enum ath6kl_cfg_suspend_mode {
+       ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+       ATH6KL_CFG_SUSPEND_CUTPOWER,
+       ATH6KL_CFG_SUSPEND_WOW
+};
 
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+                                       enum nl80211_iftype type,
+                                       u8 fw_vif_idx, u8 nw_type);
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
+struct ath6kl *ath6kl_core_alloc(struct device *dev);
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
 
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
                                   u8 *bssid, u16 listen_intvl,
                                   u16 beacon_intvl,
                                   enum network_type nw_type,
                                   u8 beacon_ie_len, u8 assoc_req_len,
                                   u8 assoc_resp_len, u8 *assoc_info);
 
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
                                      u8 *bssid, u8 assoc_resp_len,
                                      u8 *assoc_info, u16 proto_reason);
 
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
                                     bool ismcast);
 
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+                           enum ath6kl_cfg_suspend_mode mode,
+                           struct cfg80211_wowlan *wow);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar);
+
+void ath6kl_cfg80211_stop(struct ath6kl *ar);
+
 #endif /* ATH6KL_CFG80211_H */
index b92f0e5..41e465f 100644 (file)
@@ -23,8 +23,6 @@
 
 extern int ath6kl_printk(const char *level, const char *fmt, ...);
 
-#define A_CACHE_LINE_PAD            128
-
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
  * firmware. Needs to be incremented by 1 for any change in the firmware
@@ -78,20 +76,10 @@ enum crypto_type {
 struct htc_endpoint_credit_dist;
 struct ath6kl;
 enum htc_credit_dist_reason;
-struct htc_credit_state_info;
+struct ath6kl_htc_credit_info;
 
-int ath6k_setup_credit_dist(void *htc_handle,
-                           struct htc_credit_state_info *cred_info);
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
-                            struct list_head *epdist_list,
-                            enum htc_credit_dist_reason reason);
-void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
-                      struct list_head *ep_list,
-                      int tot_credits);
-void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
-                       struct htc_endpoint_credit_dist *ep_dist);
 struct ath6kl *ath6kl_core_alloc(struct device *sdev);
 int ath6kl_core_init(struct ath6kl *ar);
-int ath6kl_unavail_ev(struct ath6kl *ar);
+void ath6kl_core_cleanup(struct ath6kl *ar);
 struct sk_buff *ath6kl_buf_alloc(int size);
 #endif /* COMMON_H */
index 6d8a484..e7e095e 100644 (file)
@@ -166,6 +166,7 @@ struct ath6kl_fw_ie {
 #define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN  BIT(1)
 #define ATH6KL_CONF_ENABLE_11N                 BIT(2)
 #define ATH6KL_CONF_ENABLE_TX_BURST            BIT(3)
+#define ATH6KL_CONF_SUSPEND_CUTPOWER           BIT(4)
 
 enum wlan_low_pwr_state {
        WLAN_POWER_STATE_ON,
@@ -380,40 +381,33 @@ struct ath6kl_req_key {
        u8 key_len;
 };
 
-/* Flag info */
-#define WMI_ENABLED    0
-#define WMI_READY      1
-#define CONNECTED      2
-#define STATS_UPDATE_PEND 3
-#define CONNECT_PEND     4
-#define WMM_ENABLED      5
-#define NETQ_STOPPED     6
-#define WMI_CTRL_EP_FULL  7
-#define DTIM_EXPIRED     8
-#define DESTROY_IN_PROGRESS  9
-#define NETDEV_REGISTERED    10
-#define SKIP_SCAN           11
-#define WLAN_ENABLED        12
-#define TESTMODE            13
-#define CLEAR_BSSFILTER_ON_BEACON 14
-#define DTIM_PERIOD_AVAIL    15
+#define MAX_NUM_VIF    1
+
+/* vif flags info */
+enum ath6kl_vif_state {
+       CONNECTED,
+       CONNECT_PEND,
+       WMM_ENABLED,
+       NETQ_STOPPED,
+       DTIM_EXPIRED,
+       NETDEV_REGISTERED,
+       CLEAR_BSSFILTER_ON_BEACON,
+       DTIM_PERIOD_AVAIL,
+       WLAN_ENABLED,
+       STATS_UPDATE_PEND,
+};
 
-struct ath6kl {
-       struct device *dev;
-       struct net_device *net_dev;
-       struct ath6kl_bmi bmi;
-       const struct ath6kl_hif_ops *hif_ops;
-       struct wmi *wmi;
-       int tx_pending[ENDPOINT_MAX];
-       int total_tx_data_pend;
-       struct htc_target *htc_target;
-       void *hif_priv;
-       spinlock_t lock;
-       struct semaphore sem;
+struct ath6kl_vif {
+       struct list_head list;
+       struct wireless_dev wdev;
+       struct net_device *ndev;
+       struct ath6kl *ar;
+       /* Lock to protect vif specific net_stats and flags */
+       spinlock_t if_lock;
+       u8 fw_vif_idx;
+       unsigned long flags;
        int ssid_len;
        u8 ssid[IEEE80211_MAX_SSID_LEN];
-       u8 next_mode;
-       u8 nw_type;
        u8 dot11_auth_mode;
        u8 auth_mode;
        u8 prwise_crypto;
@@ -421,21 +415,83 @@ struct ath6kl {
        u8 grp_crypto;
        u8 grp_crypto_len;
        u8 def_txkey_index;
-       struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+       u8 next_mode;
+       u8 nw_type;
        u8 bssid[ETH_ALEN];
        u8 req_bssid[ETH_ALEN];
        u16 ch_hint;
        u16 bss_ch;
+       struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+       struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+       struct aggr_info *aggr_cntxt;
+       struct timer_list disconnect_timer;
+       struct cfg80211_scan_request *scan_req;
+       enum sme_state sme_state;
+       int reconnect_flag;
+       u32 last_roc_id;
+       u32 last_cancel_roc_id;
+       u32 send_action_id;
+       bool probe_req_report;
+       u16 next_chan;
+       u16 assoc_bss_beacon_int;
+       u8 assoc_bss_dtim_period;
+       struct net_device_stats net_stats;
+       struct target_stats target_stats;
+};
+
+#define WOW_LIST_ID            0
+#define WOW_HOST_REQ_DELAY     500 /* ms */
+
+/* Flag info */
+enum ath6kl_dev_state {
+       WMI_ENABLED,
+       WMI_READY,
+       WMI_CTRL_EP_FULL,
+       TESTMODE,
+       DESTROY_IN_PROGRESS,
+       SKIP_SCAN,
+       ROAM_TBL_PEND,
+       FIRST_BOOT,
+};
+
+enum ath6kl_state {
+       ATH6KL_STATE_OFF,
+       ATH6KL_STATE_ON,
+       ATH6KL_STATE_DEEPSLEEP,
+       ATH6KL_STATE_CUTPOWER,
+       ATH6KL_STATE_WOW,
+};
+
+struct ath6kl {
+       struct device *dev;
+       struct wiphy *wiphy;
+
+       enum ath6kl_state state;
+
+       struct ath6kl_bmi bmi;
+       const struct ath6kl_hif_ops *hif_ops;
+       struct wmi *wmi;
+       int tx_pending[ENDPOINT_MAX];
+       int total_tx_data_pend;
+       struct htc_target *htc_target;
+       void *hif_priv;
+       struct list_head vif_list;
+       /* Lock to avoid race in vif_list entries among add/del/traverse */
+       spinlock_t list_lock;
+       u8 num_vif;
+       u8 max_norm_iface;
+       u8 avail_idx_map;
+       spinlock_t lock;
+       struct semaphore sem;
        u16 listen_intvl_b;
        u16 listen_intvl_t;
        u8 lrssi_roam_threshold;
        struct ath6kl_version version;
        u32 target_type;
        u8 tx_pwr;
-       struct net_device_stats net_stats;
-       struct target_stats target_stats;
        struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
        u8 ibss_ps_enable;
+       bool ibss_if_active;
        u8 node_num;
        u8 next_ep_id;
        struct ath6kl_cookie *cookie_list;
@@ -446,7 +502,7 @@ struct ath6kl {
        u8 hiac_stream_active_pri;
        u8 ep2ac_map[ENDPOINT_MAX];
        enum htc_endpoint_id ctrl_ep;
-       struct htc_credit_state_info credit_state_info;
+       struct ath6kl_htc_credit_info credit_state_info;
        u32 connect_ctrl_flags;
        u32 user_key_ctrl;
        u8 usr_bss_filter;
@@ -456,18 +512,13 @@ struct ath6kl {
        struct sk_buff_head mcastpsq;
        spinlock_t mcastpsq_lock;
        u8 intra_bss;
-       struct aggr_info *aggr_cntxt;
        struct wmi_ap_mode_stat ap_stats;
        u8 ap_country_code[3];
        struct list_head amsdu_rx_buffer_queue;
-       struct timer_list disconnect_timer;
        u8 rx_meta_ver;
-       struct wireless_dev *wdev;
-       struct cfg80211_scan_request *scan_req;
-       struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
-       enum sme_state sme_state;
        enum wlan_low_pwr_state wlan_pwr_state;
        struct wmi_scan_params_cmd sc_params;
+       u8 mac_addr[ETH_ALEN];
 #define AR_MCAST_FILTER_MAC_ADDR_SIZE  4
        struct {
                void *rx_report;
@@ -487,7 +538,6 @@ struct ath6kl {
        struct ath6kl_mbox_info mbox_info;
 
        struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
-       int reconnect_flag;
        unsigned long flag;
 
        u8 *fw_board;
@@ -508,13 +558,7 @@ struct ath6kl {
 
        struct dentry *debugfs_phy;
 
-       u32 send_action_id;
-       bool probe_req_report;
-       u16 next_chan;
-
        bool p2p;
-       u16 assoc_bss_beacon_int;
-       u8 assoc_bss_dtim_period;
 
 #ifdef CONFIG_ATH6KL_DEBUG
        struct {
@@ -529,23 +573,19 @@ struct ath6kl {
                struct {
                        unsigned int invalid_rate;
                } war_stats;
+
+               u8 *roam_tbl;
+               unsigned int roam_tbl_len;
+
+               u8 keepalive;
+               u8 disc_timeout;
        } debug;
 #endif /* CONFIG_ATH6KL_DEBUG */
 };
 
 static inline void *ath6kl_priv(struct net_device *dev)
 {
-       return wdev_priv(dev->ieee80211_ptr);
-}
-
-static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
-                                              *cred_info,
-                                              struct htc_endpoint_credit_dist
-                                              *ep_dist, int credits)
-{
-       ep_dist->credits += credits;
-       ep_dist->cred_assngd += credits;
-       cred_info->cur_free_credits -= credits;
+       return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
 }
 
 static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@@ -561,7 +601,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
        return addr;
 }
 
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
 int ath6kl_configure_target(struct ath6kl *ar);
 void ath6kl_detect_error(unsigned long ptr);
 void disconnect_timer_handler(unsigned long ptr);
@@ -579,10 +618,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
 int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
 int ath6kl_read_fwlogs(struct ath6kl *ar);
-void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl_vif *vif);
 void ath6kl_tx_data_cleanup(struct ath6kl *ar);
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
-                         bool get_dbglogs);
 
 struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
 void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@@ -598,40 +635,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
 void aggr_module_destroy(struct aggr_info *aggr_info);
 void aggr_reset_state(struct aggr_info *aggr_info);
 
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
 struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
 
 void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
                      enum htc_endpoint_id eid);
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
                          u8 *bssid, u16 listen_int,
                          u16 beacon_int, enum network_type net_type,
                          u8 beacon_ie_len, u8 assoc_req_len,
                          u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel);
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
                                u8 keymgmt, u8 ucipher, u8 auth,
                                u8 assoc_req_len, u8 *assoc_info);
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
                             u8 *bssid, u8 assoc_resp_len,
                             u8 *assoc_info, u16 prot_reason_status);
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
 void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
 enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
 
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
 
-void ath6kl_dtimexpiry_event(struct ath6kl *ar);
-void ath6kl_disconnect(struct ath6kl *ar);
-void ath6kl_deep_sleep_enable(struct ath6kl *ar);
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
+void ath6kl_disconnect(struct ath6kl_vif *vif);
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
                             u8 win_sz);
 void ath6kl_wakeup_event(void *dev);
-void ath6kl_target_failure(struct ath6kl *ar);
+
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+                        bool wait_fot_compltn, bool cold_reset);
+void ath6kl_init_control_info(struct ath6kl_vif *vif);
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
+void ath6kl_core_free(struct ath6kl *ar);
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+int ath6kl_init_hw_start(struct ath6kl *ar);
+int ath6kl_init_hw_stop(struct ath6kl *ar);
+void ath6kl_check_wow_status(struct ath6kl *ar);
 
 #endif /* CORE_H */
index 7879b53..9eff0d0 100644 (file)
@@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
 
 static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
 {
-       ath6kl_dbg(ATH6KL_DBG_ANY,
+       ath6kl_dbg(ATH6KL_DBG_CREDIT,
                   "--- endpoint: %d  svc_id: 0x%X ---\n",
                   ep_dist->endpoint, ep_dist->svc_id);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags     : 0x%X\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags     : 0x%X\n",
                   ep_dist->dist_flags);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm      : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm      : %d\n",
                   ep_dist->cred_norm);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min       : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min       : %d\n",
                   ep_dist->cred_min);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " credits        : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits        : %d\n",
                   ep_dist->credits);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd    : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd    : %d\n",
                   ep_dist->cred_assngd);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred      : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred      : %d\n",
                   ep_dist->seek_cred);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz        : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz        : %d\n",
                   ep_dist->cred_sz);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg   : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg   : %d\n",
                   ep_dist->cred_per_msg);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist   : %d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist   : %d\n",
                   ep_dist->cred_to_dist);
-       ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth      : %d\n",
-                  get_queue_depth(&((struct htc_endpoint *)
-                                    ep_dist->htc_rsvd)->txq));
-       ath6kl_dbg(ATH6KL_DBG_ANY,
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth      : %d\n",
+                  get_queue_depth(&ep_dist->htc_ep->txq));
+       ath6kl_dbg(ATH6KL_DBG_CREDIT,
                   "----------------------------------\n");
 }
 
+/* FIXME: move to htc.c */
 void dump_cred_dist_stats(struct htc_target *target)
 {
        struct htc_endpoint_credit_dist *ep_list;
 
-       if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+       if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
                return;
 
        list_for_each_entry(ep_list, &target->cred_dist_list, list)
                dump_cred_dist(ep_list);
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
-                  target->cred_dist_cntxt, NULL);
-       ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
-                  target->cred_dist_cntxt->total_avail_credits,
-                  target->cred_dist_cntxt->cur_free_credits);
+       ath6kl_dbg(ATH6KL_DBG_CREDIT,
+                  "credit distribution total %d free %d\n",
+                  target->credit_info->total_avail_credits,
+                  target->credit_info->cur_free_credits);
 }
 
 static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
                                   size_t count, loff_t *ppos)
 {
        struct ath6kl *ar = file->private_data;
-       struct target_stats *tgt_stats = &ar->target_stats;
+       struct ath6kl_vif *vif;
+       struct target_stats *tgt_stats;
        char *buf;
        unsigned int len = 0, buf_len = 1500;
        int i;
        long left;
        ssize_t ret_cnt;
 
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       tgt_stats = &vif->target_stats;
+
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
                return -EBUSY;
        }
 
-       set_bit(STATS_UPDATE_PEND, &ar->flag);
+       set_bit(STATS_UPDATE_PEND, &vif->flags);
 
-       if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+       if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
                up(&ar->sem);
                kfree(buf);
                return -EIO;
@@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
 
        left = wait_event_interruptible_timeout(ar->event_wq,
                                                !test_bit(STATS_UPDATE_PEND,
-                                               &ar->flag), WMI_TIMEOUT);
+                                               &vif->flags), WMI_TIMEOUT);
 
        up(&ar->sem);
 
@@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
 
        len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
                         "Total Avail Credits: ",
-                        target->cred_dist_cntxt->total_avail_credits);
+                        target->credit_info->total_avail_credits);
        len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
                         "Free credits :",
-                        target->cred_dist_cntxt->cur_free_credits);
+                        target->credit_info->cur_free_credits);
 
        len += scnprintf(buf + len, buf_len - len,
                         " Epid  Flags    Cred_norm  Cred_min  Credits  Cred_assngd"
@@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
                print_credit_info("%9d", cred_per_msg);
                print_credit_info("%14d", cred_to_dist);
                len += scnprintf(buf + len, buf_len - len, "%12d\n",
-                                get_queue_depth(&((struct htc_endpoint *)
-                                                ep_list->htc_rsvd)->txq));
+                                get_queue_depth(&ep_list->htc_ep->txq));
        }
 
        if (len > buf_len)
@@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = {
        .llseek = default_llseek,
 };
 
+static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
+                                       unsigned int buf_len, unsigned int len,
+                                       int offset, const char *name)
+{
+       int i;
+       struct htc_endpoint_stats *ep_st;
+       u32 *counter;
+
+       len += scnprintf(buf + len, buf_len - len, "%s:", name);
+       for (i = 0; i < ENDPOINT_MAX; i++) {
+               ep_st = &target->endpoint[i].ep_st;
+               counter = ((u32 *) ep_st) + (offset / 4);
+               len += scnprintf(buf + len, buf_len - len, " %u", *counter);
+       }
+       len += scnprintf(buf + len, buf_len - len, "\n");
+
+       return len;
+}
+
+static ssize_t ath6kl_endpoint_stats_read(struct file *file,
+                                         char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       struct htc_target *target = ar->htc_target;
+       char *buf;
+       unsigned int buf_len, len = 0;
+       ssize_t ret_cnt;
+
+       buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
+               (25 + ENDPOINT_MAX * 11);
+       buf = kmalloc(buf_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+#define EPSTAT(name)                                                   \
+       len = print_endpoint_stat(target, buf, buf_len, len,            \
+                                 offsetof(struct htc_endpoint_stats, name), \
+                                 #name)
+       EPSTAT(cred_low_indicate);
+       EPSTAT(tx_issued);
+       EPSTAT(tx_pkt_bundled);
+       EPSTAT(tx_bundles);
+       EPSTAT(tx_dropped);
+       EPSTAT(tx_cred_rpt);
+       EPSTAT(cred_rpt_from_rx);
+       EPSTAT(cred_rpt_from_other);
+       EPSTAT(cred_rpt_ep0);
+       EPSTAT(cred_from_rx);
+       EPSTAT(cred_from_other);
+       EPSTAT(cred_from_ep0);
+       EPSTAT(cred_cosumd);
+       EPSTAT(cred_retnd);
+       EPSTAT(rx_pkts);
+       EPSTAT(rx_lkahds);
+       EPSTAT(rx_bundl);
+       EPSTAT(rx_bundle_lkahd);
+       EPSTAT(rx_bundle_from_hdr);
+       EPSTAT(rx_alloc_thresh_hit);
+       EPSTAT(rxalloc_thresh_byte);
+#undef EPSTAT
+
+       if (len > buf_len)
+               len = buf_len;
+
+       ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+       return ret_cnt;
+}
+
+static ssize_t ath6kl_endpoint_stats_write(struct file *file,
+                                          const char __user *user_buf,
+                                          size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       struct htc_target *target = ar->htc_target;
+       int ret, i;
+       u32 val;
+       struct htc_endpoint_stats *ep_st;
+
+       ret = kstrtou32_from_user(user_buf, count, 0, &val);
+       if (ret)
+               return ret;
+       if (val == 0) {
+               for (i = 0; i < ENDPOINT_MAX; i++) {
+                       ep_st = &target->endpoint[i].ep_st;
+                       memset(ep_st, 0, sizeof(*ep_st));
+               }
+       }
+
+       return count;
+}
+
+static const struct file_operations fops_endpoint_stats = {
+       .open = ath6kl_debugfs_open,
+       .read = ath6kl_endpoint_stats_read,
+       .write = ath6kl_endpoint_stats_write,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static unsigned long ath6kl_get_num_reg(void)
 {
        int i;
@@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = {
        .llseek = default_llseek,
 };
 
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+                               size_t len)
+{
+       const struct wmi_target_roam_tbl *tbl;
+       u16 num_entries;
+
+       if (len < sizeof(*tbl))
+               return -EINVAL;
+
+       tbl = (const struct wmi_target_roam_tbl *) buf;
+       num_entries = le16_to_cpu(tbl->num_entries);
+       if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
+           len)
+               return -EINVAL;
+
+       if (ar->debug.roam_tbl == NULL ||
+           ar->debug.roam_tbl_len < (unsigned int) len) {
+               kfree(ar->debug.roam_tbl);
+               ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
+               if (ar->debug.roam_tbl == NULL)
+                       return -ENOMEM;
+       }
+
+       memcpy(ar->debug.roam_tbl, buf, len);
+       ar->debug.roam_tbl_len = len;
+
+       if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
+               clear_bit(ROAM_TBL_PEND, &ar->flag);
+               wake_up(&ar->event_wq);
+       }
+
+       return 0;
+}
+
+static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       int ret;
+       long left;
+       struct wmi_target_roam_tbl *tbl;
+       u16 num_entries, i;
+       char *buf;
+       unsigned int len, buf_len;
+       ssize_t ret_cnt;
+
+       if (down_interruptible(&ar->sem))
+               return -EBUSY;
+
+       set_bit(ROAM_TBL_PEND, &ar->flag);
+
+       ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
+       if (ret) {
+               up(&ar->sem);
+               return ret;
+       }
+
+       left = wait_event_interruptible_timeout(
+               ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
+       up(&ar->sem);
+
+       if (left <= 0)
+               return -ETIMEDOUT;
+
+       if (ar->debug.roam_tbl == NULL)
+               return -ENOMEM;
+
+       tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
+       num_entries = le16_to_cpu(tbl->num_entries);
+
+       buf_len = 100 + num_entries * 100;
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+       len = 0;
+       len += scnprintf(buf + len, buf_len - len,
+                        "roam_mode=%u\n\n"
+                        "# roam_util bssid rssi rssidt last_rssi util bias\n",
+                        le16_to_cpu(tbl->roam_mode));
+
+       for (i = 0; i < num_entries; i++) {
+               struct wmi_bss_roam_info *info = &tbl->info[i];
+               len += scnprintf(buf + len, buf_len - len,
+                                "%d %pM %d %d %d %d %d\n",
+                                a_sle32_to_cpu(info->roam_util), info->bssid,
+                                info->rssi, info->rssidt, info->last_rssi,
+                                info->util, info->bias);
+       }
+
+       if (len > buf_len)
+               len = buf_len;
+
+       ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+       kfree(buf);
+       return ret_cnt;
+}
+
+static const struct file_operations fops_roam_table = {
+       .read = ath6kl_roam_table_read,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_force_roam_write(struct file *file,
+                                      const char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       int ret;
+       char buf[20];
+       size_t len;
+       u8 bssid[ETH_ALEN];
+       int i;
+       int addr[ETH_ALEN];
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+       buf[len] = '\0';
+
+       if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+                  &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+           != ETH_ALEN)
+               return -EINVAL;
+       for (i = 0; i < ETH_ALEN; i++)
+               bssid[i] = addr[i];
+
+       ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static const struct file_operations fops_force_roam = {
+       .write = ath6kl_force_roam_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_roam_mode_write(struct file *file,
+                                     const char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       int ret;
+       char buf[20];
+       size_t len;
+       enum wmi_roam_mode mode;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+       buf[len] = '\0';
+       if (len > 0 && buf[len - 1] == '\n')
+               buf[len - 1] = '\0';
+
+       if (strcasecmp(buf, "default") == 0)
+               mode = WMI_DEFAULT_ROAM_MODE;
+       else if (strcasecmp(buf, "bssbias") == 0)
+               mode = WMI_HOST_BIAS_ROAM_MODE;
+       else if (strcasecmp(buf, "lock") == 0)
+               mode = WMI_LOCK_BSS_MODE;
+       else
+               return -EINVAL;
+
+       ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static const struct file_operations fops_roam_mode = {
+       .write = ath6kl_roam_mode_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+       ar->debug.keepalive = keepalive;
+}
+
+static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       char buf[16];
+       int len;
+
+       len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_keepalive_write(struct file *file,
+                                     const char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       int ret;
+       u8 val;
+
+       ret = kstrtou8_from_user(user_buf, count, 0, &val);
+       if (ret)
+               return ret;
+
+       ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static const struct file_operations fops_keepalive = {
+       .open = ath6kl_debugfs_open,
+       .read = ath6kl_keepalive_read,
+       .write = ath6kl_keepalive_write,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
+{
+       ar->debug.disc_timeout = timeout;
+}
+
+static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       char buf[16];
+       int len;
+
+       len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
+                                              const char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       int ret;
+       u8 val;
+
+       ret = kstrtou8_from_user(user_buf, count, 0, &val);
+       if (ret)
+               return ret;
+
+       ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static const struct file_operations fops_disconnect_timeout = {
+       .open = ath6kl_debugfs_open,
+       .read = ath6kl_disconnect_timeout_read,
+       .write = ath6kl_disconnect_timeout_write,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_create_qos_write(struct file *file,
+                                               const char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+
+       struct ath6kl *ar = file->private_data;
+       struct ath6kl_vif *vif;
+       char buf[200];
+       ssize_t len;
+       char *sptr, *token;
+       struct wmi_create_pstream_cmd pstream;
+       u32 val32;
+       u16 val16;
+
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+       buf[len] = '\0';
+       sptr = buf;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &pstream.user_pri))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &pstream.traffic_direc))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &pstream.traffic_class))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &pstream.traffic_type))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &pstream.voice_psc_cap))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.min_service_int = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.max_service_int = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.inactivity_int = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.suspension_int = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.service_start_time = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &pstream.tsid))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &val16))
+               return -EINVAL;
+       pstream.nominal_msdu = cpu_to_le16(val16);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &val16))
+               return -EINVAL;
+       pstream.max_msdu = cpu_to_le16(val16);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.min_data_rate = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.mean_data_rate = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.peak_data_rate = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.max_burst_size = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.delay_bound = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.min_phy_rate = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.sba = cpu_to_le32(val32);
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou32(token, 0, &val32))
+               return -EINVAL;
+       pstream.medium_time = cpu_to_le32(val32);
+
+       ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
+
+       return count;
+}
+
+static const struct file_operations fops_create_qos = {
+       .write = ath6kl_create_qos_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_delete_qos_write(struct file *file,
+                               const char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+
+       struct ath6kl *ar = file->private_data;
+       struct ath6kl_vif *vif;
+       char buf[100];
+       ssize_t len;
+       char *sptr, *token;
+       u8 traffic_class;
+       u8 tsid;
+
+       vif = ath6kl_vif_first(ar);
+       if (!vif)
+               return -EIO;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+       buf[len] = '\0';
+       sptr = buf;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &traffic_class))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou8(token, 0, &tsid))
+               return -EINVAL;
+
+       ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
+                                     traffic_class, tsid);
+
+       return count;
+}
+
+static const struct file_operations fops_delete_qos = {
+       .write = ath6kl_delete_qos_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_bgscan_int_write(struct file *file,
+                               const char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       u16 bgscan_int;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtou16(buf, 0, &bgscan_int))
+               return -EINVAL;
+
+       if (bgscan_int == 0)
+               bgscan_int = 0xffff;
+
+       ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
+                                 0, 0, 0);
+
+       return count;
+}
+
+static const struct file_operations fops_bgscan_int = {
+       .write = ath6kl_bgscan_int_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_listen_int_write(struct file *file,
+                                               const char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       u16 listen_int_t, listen_int_b;
+       char buf[32];
+       char *sptr, *token;
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       sptr = buf;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+
+       if (kstrtou16(token, 0, &listen_int_t))
+               return -EINVAL;
+
+       if (kstrtou16(sptr, 0, &listen_int_b))
+               return -EINVAL;
+
+       if ((listen_int_t < 15) || (listen_int_t > 5000))
+               return -EINVAL;
+
+       if ((listen_int_b < 1) || (listen_int_b > 50))
+               return -EINVAL;
+
+       ar->listen_intvl_t = listen_int_t;
+       ar->listen_intvl_b = listen_int_b;
+
+       ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
+                                     ar->listen_intvl_b);
+
+       return count;
+}
+
+static ssize_t ath6kl_listen_int_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       char buf[16];
+       int len;
+
+       len = snprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
+                                       ar->listen_intvl_b);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_listen_int = {
+       .read = ath6kl_listen_int_read,
+       .write = ath6kl_listen_int_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_power_params_write(struct file *file,
+                                               const char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ath6kl *ar = file->private_data;
+       u8 buf[100];
+       unsigned int len = 0;
+       char *sptr, *token;
+       u16 idle_period, ps_poll_num, dtim,
+               tx_wakeup, num_tx;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+       buf[len] = '\0';
+       sptr = buf;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &idle_period))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &ps_poll_num))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &dtim))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &tx_wakeup))
+               return -EINVAL;
+
+       token = strsep(&sptr, " ");
+       if (!token)
+               return -EINVAL;
+       if (kstrtou16(token, 0, &num_tx))
+               return -EINVAL;
+
+       ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
+                               dtim, tx_wakeup, num_tx, 0);
+
+       return count;
+}
+
+static const struct file_operations fops_power_params = {
+       .write = ath6kl_power_params_write,
+       .open = ath6kl_debugfs_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath6kl_debug_init(struct ath6kl *ar)
 {
        ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar)
        ar->debug.fwlog_mask = 0;
 
        ar->debugfs_phy = debugfs_create_dir("ath6kl",
-                                            ar->wdev->wiphy->debugfsdir);
+                                            ar->wiphy->debugfsdir);
        if (!ar->debugfs_phy) {
                vfree(ar->debug.fwlog_buf.buf);
                kfree(ar->debug.fwlog_tmp);
@@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
        debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
                            &fops_credit_dist_stats);
 
+       debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
+                           ar->debugfs_phy, ar, &fops_endpoint_stats);
+
        debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
                            &fops_fwlog);
 
@@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar)
        debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
                            &fops_war_stats);
 
+       debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
+                           &fops_roam_table);
+
+       debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
+                           &fops_force_roam);
+
+       debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
+                           &fops_roam_mode);
+
+       debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+                           &fops_keepalive);
+
+       debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
+                           ar->debugfs_phy, ar, &fops_disconnect_timeout);
+
+       debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
+                               &fops_create_qos);
+
+       debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
+                               &fops_delete_qos);
+
+       debugfs_create_file("bgscan_interval", S_IWUSR,
+                               ar->debugfs_phy, ar, &fops_bgscan_int);
+
+       debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
+                                               &fops_power_params);
+
        return 0;
 }
 
@@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar)
 {
        vfree(ar->debug.fwlog_buf.buf);
        kfree(ar->debug.fwlog_tmp);
+       kfree(ar->debug.roam_tbl);
 }
 
 #endif
index 7b7675f..9853c9c 100644 (file)
 #ifndef DEBUG_H
 #define DEBUG_H
 
-#include "htc_hif.h"
+#include "hif.h"
 
 enum ATH6K_DEBUG_MASK {
-       ATH6KL_DBG_WLAN_CONNECT = BIT(0),     /* wlan connect */
-       ATH6KL_DBG_WLAN_SCAN    = BIT(1),     /* wlan scan */
+       ATH6KL_DBG_CREDIT       = BIT(0),
+       /* hole */
        ATH6KL_DBG_WLAN_TX      = BIT(2),     /* wlan tx */
        ATH6KL_DBG_WLAN_RX      = BIT(3),     /* wlan rx */
        ATH6KL_DBG_BMI          = BIT(4),     /* bmi tracing */
-       ATH6KL_DBG_HTC_SEND     = BIT(5),     /* htc send */
-       ATH6KL_DBG_HTC_RECV     = BIT(6),     /* htc recv */
+       ATH6KL_DBG_HTC          = BIT(5),
+       ATH6KL_DBG_HIF          = BIT(6),
        ATH6KL_DBG_IRQ          = BIT(7),     /* interrupt processing */
-       ATH6KL_DBG_PM           = BIT(8),     /* power management */
-       ATH6KL_DBG_WLAN_NODE    = BIT(9),     /* general wlan node tracing */
+       /* hole */
+       /* hole */
        ATH6KL_DBG_WMI          = BIT(10),    /* wmi tracing */
        ATH6KL_DBG_TRC          = BIT(11),    /* generic func tracing */
        ATH6KL_DBG_SCATTER      = BIT(12),    /* hif scatter tracing */
@@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK {
        ATH6KL_DBG_SDIO_DUMP    = BIT(17),
        ATH6KL_DBG_BOOT         = BIT(18),    /* driver init and fw boot */
        ATH6KL_DBG_WMI_DUMP     = BIT(19),
+       ATH6KL_DBG_SUSPEND      = BIT(20),
        ATH6KL_DBG_ANY          = 0xffffffff  /* enable all logs */
 };
 
@@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
 void dump_cred_dist_stats(struct htc_target *target);
 void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
 void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+                               size_t len);
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
 int ath6kl_debug_init(struct ath6kl *ar);
 void ath6kl_debug_cleanup(struct ath6kl *ar);
 
@@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
 {
 }
 
+static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
+                                             const void *buf, size_t len)
+{
+       return 0;
+}
+
+static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+}
+
+static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
+                                                      u8 timeout)
+{
+}
+
 static inline int ath6kl_debug_init(struct ath6kl *ar)
 {
        return 0;
index d6c898f..eed2287 100644 (file)
 #define HIF_OPS_H
 
 #include "hif.h"
+#include "debug.h"
 
 static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
                                      u32 len, u32 request)
 {
+       ath6kl_dbg(ATH6KL_DBG_HIF,
+                  "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
+                  (request & HIF_WRITE) ? "write" : "read",
+                  addr, buf, len, request);
+
        return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
 }
 
@@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
                                  u32 length, u32 request,
                                  struct htc_packet *packet)
 {
+       ath6kl_dbg(ATH6KL_DBG_HIF,
+                  "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
+                  address, buffer, length, request);
+
        return ar->hif_ops->write_async(ar, address, buffer, length,
                                        request, packet);
 }
 static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
 {
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
+
        return ar->hif_ops->irq_enable(ar);
 }
 
 static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
 {
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
+
        return ar->hif_ops->irq_disable(ar);
 }
 
@@ -69,9 +83,40 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
        return ar->hif_ops->cleanup_scatter(ar);
 }
 
-static inline int ath6kl_hif_suspend(struct ath6kl *ar)
+static inline int ath6kl_hif_suspend(struct ath6kl *ar,
+                                    struct cfg80211_wowlan *wow)
 {
-       return ar->hif_ops->suspend(ar);
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
+
+       return ar->hif_ops->suspend(ar, wow);
+}
+
+static inline int ath6kl_hif_resume(struct ath6kl *ar)
+{
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
+
+       return ar->hif_ops->resume(ar);
+}
+
+static inline int ath6kl_hif_power_on(struct ath6kl *ar)
+{
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
+
+       return ar->hif_ops->power_on(ar);
+}
+
+static inline int ath6kl_hif_power_off(struct ath6kl *ar)
+{
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
+
+       return ar->hif_ops->power_off(ar);
+}
+
+static inline void ath6kl_hif_stop(struct ath6kl *ar)
+{
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
+
+       ar->hif_ops->stop(ar);
 }
 
 #endif
similarity index 80%
rename from drivers/net/wireless/ath/ath6kl/htc_hif.c
rename to drivers/net/wireless/ath/ath6kl/hif.c
index 86b1cc7..e57da35 100644 (file)
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+#include "hif.h"
 
 #include "core.h"
 #include "target.h"
 #include "hif-ops.h"
-#include "htc_hif.h"
 #include "debug.h"
 
 #define MAILBOX_FOR_BLOCK_SIZE          1
 
 #define ATH6KL_TIME_QUANTUM    10  /* in ms */
 
-static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
+                                     bool from_dma)
 {
        u8 *buf;
        int i;
@@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
        return 0;
 }
 
-int ath6kldev_rw_comp_handler(void *context, int status)
+int ath6kl_hif_rw_comp_handler(void *context, int status)
 {
        struct htc_packet *packet = context;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                  "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
                   packet, status);
 
        packet->status = status;
@@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status)
 
        return 0;
 }
+#define REG_DUMP_COUNT_AR6003   60
+#define REGISTER_DUMP_LEN_MAX   60
 
-static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
 {
-       u32 dummy;
-       int status;
+       __le32 regdump_val[REGISTER_DUMP_LEN_MAX];
+       u32 i, address, regdump_addr = 0;
+       int ret;
+
+       if (ar->target_type != TARGET_TYPE_AR6003)
+               return;
+
+       /* the reg dump pointer is copied to the host interest area */
+       address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+       address = TARG_VTOP(ar->target_type, address);
+
+       /* read RAM location through diagnostic window */
+       ret = ath6kl_diag_read32(ar, address, &regdump_addr);
+
+       if (ret || !regdump_addr) {
+               ath6kl_warn("failed to get ptr to register dump area: %d\n",
+                           ret);
+               return;
+       }
+
+       ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
+               regdump_addr);
+       regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
+
+       /* fetch register dump data */
+       ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
+                                 REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+       if (ret) {
+               ath6kl_warn("failed to get register dump: %d\n", ret);
+               return;
+       }
+
+       ath6kl_info("crash dump:\n");
+       ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
+                   ar->wiphy->fw_version);
+
+       BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
 
-       ath6kl_err("target debug interrupt\n");
+       for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
+               ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
+                           4 * i,
+                           le32_to_cpu(regdump_val[i]),
+                           le32_to_cpu(regdump_val[i + 1]),
+                           le32_to_cpu(regdump_val[i + 2]),
+                           le32_to_cpu(regdump_val[i + 3]));
+       }
+
+}
+
+static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
+{
+       u32 dummy;
+       int ret;
 
-       ath6kl_target_failure(dev->ar);
+       ath6kl_warn("firmware crashed\n");
 
        /*
         * read counter to clear the interrupt, the debug error interrupt is
         * counter 0.
         */
-       status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+       ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
                                     (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
-       if (status)
-               WARN_ON(1);
+       if (ret)
+               ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
 
-       return status;
+       ath6kl_hif_dump_fw_crash(dev->ar);
+
+       return ret;
 }
 
 /* mailbox recv message polling */
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
                              int timeout)
 {
        struct ath6kl_irq_proc_registers *rg;
@@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
 
                /* delay a little  */
                mdelay(ATH6KL_TIME_QUANTUM);
-               ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+               ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
        }
 
        if (i == 0) {
@@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
                         * Target failure handler will be called in case of
                         * an assert.
                         */
-                       ath6kldev_proc_dbg_intr(dev);
+                       ath6kl_hif_proc_dbg_intr(dev);
        }
 
        return status;
@@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
  * Disable packet reception (used in case the host runs out of buffers)
  * using the interrupt enable registers through the host I/F
  */
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
 {
        struct ath6kl_irq_enable_reg regs;
        int status = 0;
 
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
+                  enable_rx ? "enable" : "disable");
+
        /* take the lock to protect interrupt enable shadows */
        spin_lock_bh(&dev->lock);
 
@@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
        return status;
 }
 
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
                              struct hif_scatter_req *scat_req, bool read)
 {
        int status = 0;
@@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
                        dev->ar->mbox_info.htc_addr;
        }
 
-       ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
-                  "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+       ath6kl_dbg(ATH6KL_DBG_HIF,
+                  "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
                   scat_req->scat_entries, scat_req->len,
                   scat_req->addr, !read ? "async" : "sync",
                   (read) ? "rd" : "wr");
 
        if (!read && scat_req->virt_scat) {
-               status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+               status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
                if (status) {
                        scat_req->status = status;
                        scat_req->complete(dev->ar->htc_target, scat_req);
@@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
                scat_req->status = status;
                if (!status && scat_req->virt_scat)
                        scat_req->status =
-                               ath6kldev_cp_scat_dma_buf(scat_req, true);
+                               ath6kl_hif_cp_scat_dma_buf(scat_req, true);
        }
 
        return status;
 }
 
-static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
 {
        u8 counter_int_status;
 
@@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
         * the debug assertion counter interrupt.
         */
        if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
-               return ath6kldev_proc_dbg_intr(dev);
+               return ath6kl_hif_proc_dbg_intr(dev);
 
        return 0;
 }
 
-static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
 {
        int status;
        u8 error_int_status;
@@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
        return status;
 }
 
-static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
 {
        int status;
        u8 cpu_int_status;
@@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
                 * we rapidly pull packets.
                 */
                status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
-                                                         &lk_ahd, &fetched);
+                                                         lk_ahd, &fetched);
                if (status)
                        goto out;
 
@@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
 
        if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
                /* CPU Interrupt */
-               status = ath6kldev_proc_cpu_intr(dev);
+               status = ath6kl_hif_proc_cpu_intr(dev);
                if (status)
                        goto out;
        }
 
        if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
                /* Error Interrupt */
-               status = ath6kldev_proc_err_intr(dev);
+               status = ath6kl_hif_proc_err_intr(dev);
                if (status)
                        goto out;
        }
 
        if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
                /* Counter Interrupt */
-               status = ath6kldev_proc_counter_intr(dev);
+               status = ath6kl_hif_proc_counter_intr(dev);
 
 out:
        /*
@@ -479,9 +535,10 @@ out:
 }
 
 /* interrupt handler, kicks off all interrupt processing */
-int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
 {
        struct ath6kl_device *dev = ar->htc_target->dev;
+       unsigned long timeout;
        int status = 0;
        bool done = false;
 
@@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
         * IRQ processing is synchronous, interrupt status registers can be
         * re-read.
         */
-       while (!done) {
+       timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
+       while (time_before(jiffies, timeout) && !done) {
                status = proc_pending_irqs(dev, &done);
                if (status)
                        break;
@@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
        return status;
 }
 
-static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
 {
        struct ath6kl_irq_enable_reg regs;
        int status;
@@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
        return status;
 }
 
-int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
 {
        struct ath6kl_irq_enable_reg regs;
 
@@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev)
 }
 
 /* enable device interrupts */
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
 {
        int status = 0;
 
@@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
         * target "soft" resets. The ATH6KL interrupt enables reset back to an
         * "enabled" state when this happens.
         */
-       ath6kldev_disable_intrs(dev);
+       ath6kl_hif_disable_intrs(dev);
 
        /* unmask the host controller interrupts */
        ath6kl_hif_irq_enable(dev->ar);
-       status = ath6kldev_enable_intrs(dev);
+       status = ath6kl_hif_enable_intrs(dev);
 
        return status;
 }
 
 /* disable all device interrupts */
-int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
 {
        /*
         * Mask the interrupt at the HIF layer to avoid any stray interrupt
         * taken while we zero out our shadow registers in
-        * ath6kldev_disable_intrs().
+        * ath6kl_hif_disable_intrs().
         */
        ath6kl_hif_irq_disable(dev->ar);
 
-       return ath6kldev_disable_intrs(dev);
+       return ath6kl_hif_disable_intrs(dev);
 }
 
-int ath6kldev_setup(struct ath6kl_device *dev)
+int ath6kl_hif_setup(struct ath6kl_device *dev)
 {
        int status = 0;
 
@@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev)
        /* must be a power of 2 */
        if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
                WARN_ON(1);
+               status = -EINVAL;
                goto fail_setup;
        }
 
        /* assemble mask, used for padding to a block */
        dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
 
-       ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+       ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
                   dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
 
-       ath6kl_dbg(ATH6KL_DBG_TRC,
-                  "hif interrupt processing is sync only\n");
-
-       status = ath6kldev_disable_intrs(dev);
+       status = ath6kl_hif_disable_intrs(dev);
 
 fail_setup:
        return status;
index 797e2d1..f2dc3bc 100644 (file)
 /* mode to enable special 4-bit interrupt assertion without clock */
 #define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ   (1 << 0)
 
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX    0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK     0x01
+
+/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ            16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER      (16 * 1024)
+#define ATH6KL_SCATTER_REQS                       4
+
+#define ATH6KL_HIF_COMMUNICATION_TIMEOUT       1000
+
 struct bus_request {
        struct list_head list;
 
@@ -186,6 +198,34 @@ struct hif_scatter_req {
        struct hif_scatter_item scat_list[1];
 };
 
+struct ath6kl_irq_proc_registers {
+       u8 host_int_status;
+       u8 cpu_int_status;
+       u8 error_int_status;
+       u8 counter_int_status;
+       u8 mbox_frame;
+       u8 rx_lkahd_valid;
+       u8 host_int_status2;
+       u8 gmbox_rx_avail;
+       __le32 rx_lkahd[2];
+       __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+       u8 int_status_en;
+       u8 cpu_int_status_en;
+       u8 err_int_status_en;
+       u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+       spinlock_t lock;
+       struct ath6kl_irq_proc_registers irq_proc_reg;
+       struct ath6kl_irq_enable_reg irq_en_reg;
+       struct htc_target *htc_cnxt;
+       struct ath6kl *ar;
+};
+
 struct ath6kl_hif_ops {
        int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
                               u32 len, u32 request);
@@ -202,7 +242,26 @@ struct ath6kl_hif_ops {
        int (*scat_req_rw) (struct ath6kl *ar,
                            struct hif_scatter_req *scat_req);
        void (*cleanup_scatter)(struct ath6kl *ar);
-       int (*suspend)(struct ath6kl *ar);
+       int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
+       int (*resume)(struct ath6kl *ar);
+       int (*power_on)(struct ath6kl *ar);
+       int (*power_off)(struct ath6kl *ar);
+       void (*stop)(struct ath6kl *ar);
 };
 
+int ath6kl_hif_setup(struct ath6kl_device *dev);
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
+                              u32 *lk_ahd, int timeout);
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kl_hif_rw_comp_handler(void *context, int status);
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+                              struct hif_scatter_req *scat_req, bool read);
+
 #endif
index f88a7c9..f3b63ca 100644 (file)
  */
 
 #include "core.h"
-#include "htc_hif.h"
+#include "hif.h"
 #include "debug.h"
 #include "hif-ops.h"
 #include <asm/unaligned.h>
 
 #define CALC_TXRX_PADDED_LEN(dev, len)  (__ALIGN_MASK((len), (dev)->block_mask))
 
+/* Functions for Tx credit handling */
+static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
+                                 struct htc_endpoint_credit_dist *ep_dist,
+                                 int credits)
+{
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
+                  ep_dist->endpoint, credits);
+
+       ep_dist->credits += credits;
+       ep_dist->cred_assngd += credits;
+       cred_info->cur_free_credits -= credits;
+}
+
+static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
+                              struct list_head *ep_list,
+                              int tot_credits)
+{
+       struct htc_endpoint_credit_dist *cur_ep_dist;
+       int count;
+
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
+
+       cred_info->cur_free_credits = tot_credits;
+       cred_info->total_avail_credits = tot_credits;
+
+       list_for_each_entry(cur_ep_dist, ep_list, list) {
+               if (cur_ep_dist->endpoint == ENDPOINT_0)
+                       continue;
+
+               cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+               if (tot_credits > 4) {
+                       if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+                           (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+                               ath6kl_credit_deposit(cred_info,
+                                                     cur_ep_dist,
+                                                     cur_ep_dist->cred_min);
+                               cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+                       }
+               }
+
+               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+                       ath6kl_credit_deposit(cred_info, cur_ep_dist,
+                                             cur_ep_dist->cred_min);
+                       /*
+                        * Control service is always marked active, it
+                        * never goes inactive EVER.
+                        */
+                       cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+               } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+                       /* this is the lowest priority data endpoint */
+                       /* FIXME: this looks fishy, check */
+                       cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+               /*
+                * Streams have to be created (explicit | implicit) for all
+                * kinds of traffic. BE endpoints are also inactive in the
+                * beginning. When BE traffic starts it creates implicit
+                * streams that redistributes credits.
+                *
+                * Note: all other endpoints have minimums set but are
+                * initially given NO credits. credits will be distributed
+                * as traffic activity demands
+                */
+       }
+
+       WARN_ON(cred_info->cur_free_credits <= 0);
+
+       list_for_each_entry(cur_ep_dist, ep_list, list) {
+               if (cur_ep_dist->endpoint == ENDPOINT_0)
+                       continue;
+
+               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+                       cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+               else {
+                       /*
+                        * For the remaining data endpoints, we assume that
+                        * each cred_per_msg are the same. We use a simple
+                        * calculation here, we take the remaining credits
+                        * and determine how many max messages this can
+                        * cover and then set each endpoint's normal value
+                        * equal to 3/4 this amount.
+                        */
+                       count = (cred_info->cur_free_credits /
+                                cur_ep_dist->cred_per_msg)
+                               * cur_ep_dist->cred_per_msg;
+                       count = (count * 3) >> 2;
+                       count = max(count, cur_ep_dist->cred_per_msg);
+                       cur_ep_dist->cred_norm = count;
+
+               }
+
+               ath6kl_dbg(ATH6KL_DBG_CREDIT,
+                          "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
+                          cur_ep_dist->endpoint,
+                          cur_ep_dist->svc_id,
+                          cur_ep_dist->credits,
+                          cur_ep_dist->cred_per_msg,
+                          cur_ep_dist->cred_norm,
+                          cur_ep_dist->cred_min);
+       }
+}
+
+/* initialize and setup credit distribution */
+int ath6kl_credit_setup(void *htc_handle,
+                       struct ath6kl_htc_credit_info *cred_info)
+{
+       u16 servicepriority[5];
+
+       memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
+
+       servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
+       servicepriority[1] = WMI_DATA_VO_SVC;
+       servicepriority[2] = WMI_DATA_VI_SVC;
+       servicepriority[3] = WMI_DATA_BE_SVC;
+       servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+       /* set priority list */
+       ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+       return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
+                                struct htc_endpoint_credit_dist *ep_dist,
+                                int limit)
+{
+       int credits;
+
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
+                  ep_dist->endpoint, limit);
+
+       ep_dist->cred_assngd = limit;
+
+       if (ep_dist->credits <= limit)
+               return;
+
+       credits = ep_dist->credits - limit;
+       ep_dist->credits -= credits;
+       cred_info->cur_free_credits += credits;
+}
+
+static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
+                                struct list_head *epdist_list)
+{
+       struct htc_endpoint_credit_dist *cur_dist_list;
+
+       list_for_each_entry(cur_dist_list, epdist_list, list) {
+               if (cur_dist_list->endpoint == ENDPOINT_0)
+                       continue;
+
+               if (cur_dist_list->cred_to_dist > 0) {
+                       cur_dist_list->credits +=
+                                       cur_dist_list->cred_to_dist;
+                       cur_dist_list->cred_to_dist = 0;
+                       if (cur_dist_list->credits >
+                           cur_dist_list->cred_assngd)
+                               ath6kl_credit_reduce(cred_info,
+                                               cur_dist_list,
+                                               cur_dist_list->cred_assngd);
+
+                       if (cur_dist_list->credits >
+                           cur_dist_list->cred_norm)
+                               ath6kl_credit_reduce(cred_info, cur_dist_list,
+                                                    cur_dist_list->cred_norm);
+
+                       if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+                               if (cur_dist_list->txq_depth == 0)
+                                       ath6kl_credit_reduce(cred_info,
+                                                            cur_dist_list, 0);
+                       }
+               }
+       }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
+                               struct htc_endpoint_credit_dist *ep_dist)
+{
+       struct htc_endpoint_credit_dist *curdist_list;
+       int credits = 0;
+       int need;
+
+       if (ep_dist->svc_id == WMI_CONTROL_SVC)
+               goto out;
+
+       if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+           (ep_dist->svc_id == WMI_DATA_VO_SVC))
+               if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+                       goto out;
+
+       /*
+        * For all other services, we follow a simple algorithm of:
+        *
+        * 1. checking the free pool for credits
+        * 2. checking lower priority endpoints for credits to take
+        */
+
+       credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+       if (credits >= ep_dist->seek_cred)
+               goto out;
+
+       /*
+        * We don't have enough in the free pool, try taking away from
+        * lower priority services The rule for taking away credits:
+        *
+        *   1. Only take from lower priority endpoints
+        *   2. Only take what is allocated above the minimum (never
+        *      starve an endpoint completely)
+        *   3. Only take what you need.
+        */
+
+       list_for_each_entry_reverse(curdist_list,
+                                   &cred_info->lowestpri_ep_dist,
+                                   list) {
+               if (curdist_list == ep_dist)
+                       break;
+
+               need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+               if ((curdist_list->cred_assngd - need) >=
+                    curdist_list->cred_min) {
+                       /*
+                        * The current one has been allocated more than
+                        * it's minimum and it has enough credits assigned
+                        * above it's minimum to fulfill our need try to
+                        * take away just enough to fulfill our need.
+                        */
+                       ath6kl_credit_reduce(cred_info, curdist_list,
+                                            curdist_list->cred_assngd - need);
+
+                       if (cred_info->cur_free_credits >=
+                           ep_dist->seek_cred)
+                               break;
+               }
+
+               if (curdist_list->endpoint == ENDPOINT_0)
+                       break;
+       }
+
+       credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+       /* did we find some credits? */
+       if (credits)
+               ath6kl_credit_deposit(cred_info, ep_dist, credits);
+
+       ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
+                                      struct list_head *ep_dist_list)
+{
+       struct htc_endpoint_credit_dist *curdist_list;
+
+       list_for_each_entry(curdist_list, ep_dist_list, list) {
+               if (curdist_list->endpoint == ENDPOINT_0)
+                       continue;
+
+               if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
+                   (curdist_list->svc_id == WMI_DATA_BE_SVC))
+                       curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+               if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+                   !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+                       if (curdist_list->txq_depth == 0)
+                               ath6kl_credit_reduce(info, curdist_list, 0);
+                       else
+                               ath6kl_credit_reduce(info,
+                                                    curdist_list,
+                                                    curdist_list->cred_min);
+               }
+       }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
+                                    struct list_head *ep_dist_list,
+                             enum htc_credit_dist_reason reason)
+{
+       switch (reason) {
+       case HTC_CREDIT_DIST_SEND_COMPLETE:
+               ath6kl_credit_update(cred_info, ep_dist_list);
+               break;
+       case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+               ath6kl_credit_redistribute(cred_info, ep_dist_list);
+               break;
+       default:
+               break;
+       }
+
+       WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+       WARN_ON(cred_info->cur_free_credits < 0);
+}
+
 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
 {
        u8 *align_addr;
@@ -102,12 +410,12 @@ static void htc_tx_comp_update(struct htc_target *target,
                                packet->info.tx.cred_used;
        endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-                  target->cred_dist_cntxt, &target->cred_dist_list);
+       ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
+                  target->credit_info, &target->cred_dist_list);
 
-       ath6k_credit_distribute(target->cred_dist_cntxt,
-                               &target->cred_dist_list,
-                               HTC_CREDIT_DIST_SEND_COMPLETE);
+       ath6kl_credit_distribute(target->credit_info,
+                                &target->cred_dist_list,
+                                HTC_CREDIT_DIST_SEND_COMPLETE);
 
        spin_unlock_bh(&target->tx_lock);
 }
@@ -118,8 +426,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
        if (list_empty(txq))
                return;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                  "send complete ep %d, (%d pkts)\n",
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+                  "htc tx complete ep %d pkts %d\n",
                   endpoint->eid, get_queue_depth(txq));
 
        ath6kl_tx_complete(endpoint->target->dev->ar, txq);
@@ -131,6 +439,9 @@ static void htc_tx_comp_handler(struct htc_target *target,
        struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
        struct list_head container;
 
+       ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
+                  packet->info.tx.seqno);
+
        htc_tx_comp_update(target, endpoint, packet);
        INIT_LIST_HEAD(&container);
        list_add_tail(&packet->list, &container);
@@ -148,8 +459,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
 
        INIT_LIST_HEAD(&tx_compq);
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-               "htc_async_tx_scat_complete  total len: %d  entries: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+               "htc tx scat complete len %d entries %d\n",
                scat_req->len, scat_req->scat_entries);
 
        if (scat_req->status)
@@ -190,16 +501,13 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
 
        send_len = packet->act_len + HTC_HDR_LENGTH;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
-                  __func__, send_len, sync ? "sync" : "async");
-
        padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-               "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
-               padded_len,
-               target->dev->ar->mbox_info.htc_addr,
-               sync ? "sync" : "async");
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+                  "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
+                  send_len, packet->info.tx.seqno, padded_len,
+                  target->dev->ar->mbox_info.htc_addr,
+                  sync ? "sync" : "async");
 
        if (sync) {
                status = hif_read_write_sync(target->dev->ar,
@@ -227,7 +535,7 @@ static int htc_check_credits(struct htc_target *target,
        *req_cred = (len > target->tgt_cred_sz) ?
                     DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+       ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
                   *req_cred, ep->cred_dist.credits);
 
        if (ep->cred_dist.credits < *req_cred) {
@@ -237,16 +545,13 @@ static int htc_check_credits(struct htc_target *target,
                /* Seek more credits */
                ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-                          target->cred_dist_cntxt, &ep->cred_dist);
-
-               ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+               ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
 
                ep->cred_dist.seek_cred = 0;
 
                if (ep->cred_dist.credits < *req_cred) {
-                       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                                  "not enough credits for ep %d - leaving packet in queue\n",
+                       ath6kl_dbg(ATH6KL_DBG_CREDIT,
+                                  "credit not found for ep %d\n",
                                   eid);
                        return -EINVAL;
                }
@@ -260,17 +565,15 @@ static int htc_check_credits(struct htc_target *target,
                ep->cred_dist.seek_cred =
                ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-                          target->cred_dist_cntxt, &ep->cred_dist);
-
-               ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+               ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
 
                /* see if we were successful in getting more */
                if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
                        /* tell the target we need credits ASAP! */
                        *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
                        ep->ep_st.cred_low_indicate += 1;
-                       ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+                       ath6kl_dbg(ATH6KL_DBG_CREDIT,
+                                  "credit we need credits asap\n");
                }
        }
 
@@ -295,8 +598,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
                packet = list_first_entry(&endpoint->txq, struct htc_packet,
                                          list);
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                       "got head pkt:0x%p , queue depth: %d\n",
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                       "htc tx got packet 0x%p queue depth %d\n",
                        packet, get_queue_depth(&endpoint->txq));
 
                len = CALC_TXRX_PADDED_LEN(target,
@@ -404,9 +707,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
 
                scat_req->len += len;
                scat_req->scat_entries++;
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                          "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
-                          i, packet, len, rem_scat);
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                          "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
+                          i, packet, packet->info.tx.seqno, len, rem_scat);
        }
 
        /* Roll back scatter setup in case of any failure */
@@ -455,12 +758,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
 
                if (!scat_req) {
                        /* no scatter resources  */
-                       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                               "no more scatter resources\n");
+                       ath6kl_dbg(ATH6KL_DBG_HTC,
+                               "htc tx no more scatter resources\n");
                        break;
                }
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+               ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
                           n_scat);
 
                scat_req->len = 0;
@@ -479,10 +782,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
                n_sent_bundle++;
                tot_pkts_bundle += scat_req->scat_entries;
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                          "send scatter total bytes: %d , entries: %d\n",
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                          "htc tx scatter bytes %d entries %d\n",
                           scat_req->len, scat_req->scat_entries);
-               ath6kldev_submit_scat_req(target->dev, scat_req, false);
+               ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
 
                if (status)
                        break;
@@ -490,8 +793,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
 
        *sent_bundle = n_sent_bundle;
        *n_bundle_pkts = tot_pkts_bundle;
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
-                  __func__, n_sent_bundle);
+       ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
+                  n_sent_bundle);
 
        return;
 }
@@ -510,7 +813,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
        if (endpoint->tx_proc_cnt > 1) {
                endpoint->tx_proc_cnt--;
                spin_unlock_bh(&target->tx_lock);
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+               ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
                return;
        }
 
@@ -588,15 +891,12 @@ static bool ath6kl_htc_tx_try(struct htc_target *target,
                overflow = true;
 
        if (overflow)
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                          "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
-                          endpoint->eid, overflow, txq_depth,
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                          "htc tx overflow ep %d depth %d max %d\n",
+                          endpoint->eid, txq_depth,
                           endpoint->max_txq_depth);
 
        if (overflow && ep_cb.tx_full) {
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                          "indicating overflowed tx packet: 0x%p\n", tx_pkt);
-
                if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
                    HTC_SEND_FULL_DROP) {
                        endpoint->ep_st.tx_dropped += 1;
@@ -625,12 +925,12 @@ static void htc_chk_ep_txq(struct htc_target *target)
         * are not modifying any state.
         */
        list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
-               endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+               endpoint = cred_dist->htc_ep;
 
                spin_lock_bh(&target->tx_lock);
                if (!list_empty(&endpoint->txq)) {
-                       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                                  "ep %d has %d credits and %d packets in tx queue\n",
+                       ath6kl_dbg(ATH6KL_DBG_HTC,
+                                  "htc creds ep %d credits %d pkts %d\n",
                                   cred_dist->endpoint,
                                   endpoint->cred_dist.credits,
                                   get_queue_depth(&endpoint->txq));
@@ -704,13 +1004,13 @@ static int htc_setup_tx_complete(struct htc_target *target)
 }
 
 void ath6kl_htc_set_credit_dist(struct htc_target *target,
-                               struct htc_credit_state_info *cred_dist_cntxt,
+                               struct ath6kl_htc_credit_info *credit_info,
                                u16 srvc_pri_order[], int list_len)
 {
        struct htc_endpoint *endpoint;
        int i, ep;
 
-       target->cred_dist_cntxt = cred_dist_cntxt;
+       target->credit_info = credit_info;
 
        list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
                      &target->cred_dist_list);
@@ -736,8 +1036,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
        struct htc_endpoint *endpoint;
        struct list_head queue;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                  "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+                  "htc tx ep id %d buf 0x%p len %d\n",
                   packet->endpoint, packet->buf, packet->act_len);
 
        if (packet->endpoint >= ENDPOINT_MAX) {
@@ -787,8 +1087,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
        list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
                packet->status = -ECANCELED;
                list_del(&packet->list);
-               ath6kl_dbg(ATH6KL_DBG_TRC,
-                       "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                       "htc tx flushing pkt 0x%p len %d  ep %d tag 0x%x\n",
                        packet, packet->act_len,
                        packet->endpoint, packet->info.tx.tag);
 
@@ -844,12 +1144,13 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target,
                endpoint->cred_dist.txq_depth =
                        get_queue_depth(&endpoint->txq);
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-                          target->cred_dist_cntxt, &target->cred_dist_list);
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                          "htc tx activity ctxt 0x%p dist 0x%p\n",
+                          target->credit_info, &target->cred_dist_list);
 
-               ath6k_credit_distribute(target->cred_dist_cntxt,
-                                       &target->cred_dist_list,
-                                       HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+               ath6kl_credit_distribute(target->credit_info,
+                                        &target->cred_dist_list,
+                                        HTC_CREDIT_DIST_ACTIVITY_CHANGE);
        }
 
        spin_unlock_bh(&target->tx_lock);
@@ -919,15 +1220,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
        padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
 
        if (padded_len > packet->buf_len) {
-               ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+               ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
                           padded_len, rx_len, packet->buf_len);
                return -ENOMEM;
        }
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                  "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+                  "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
                   packet, packet->info.rx.exp_hdr,
-                  padded_len, dev->ar->mbox_info.htc_addr, "sync");
+                  padded_len, dev->ar->mbox_info.htc_addr);
 
        status = hif_read_write_sync(dev->ar,
                                     dev->ar->mbox_info.htc_addr,
@@ -1137,8 +1438,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
                        }
 
                        endpoint->ep_st.rx_bundle_from_hdr += 1;
-                       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                                  "htc hdr indicates :%d msg can be fetched as a bundle\n",
+                       ath6kl_dbg(ATH6KL_DBG_HTC,
+                                  "htc rx bundle pkts %d\n",
                                   n_msg);
                } else
                        /* HTC header only indicates 1 message to fetch */
@@ -1191,8 +1492,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
                ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
                        packets->act_len + HTC_HDR_LENGTH);
 
-               ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
-                               "Unexpected ENDPOINT 0 Message", "",
+               ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+                               "htc rx unexpected endpoint 0 message", "",
                                packets->buf - HTC_HDR_LENGTH,
                                packets->act_len + HTC_HDR_LENGTH);
        }
@@ -1209,9 +1510,6 @@ static void htc_proc_cred_rpt(struct htc_target *target,
        int tot_credits = 0, i;
        bool dist = false;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                  "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
-
        spin_lock_bh(&target->tx_lock);
 
        for (i = 0; i < n_entries; i++, rpt++) {
@@ -1223,8 +1521,9 @@ static void htc_proc_cred_rpt(struct htc_target *target,
 
                endpoint = &target->endpoint[rpt->eid];
 
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
-                       rpt->eid, rpt->credits);
+               ath6kl_dbg(ATH6KL_DBG_CREDIT,
+                          "credit report ep %d credits %d\n",
+                          rpt->eid, rpt->credits);
 
                endpoint->ep_st.tx_cred_rpt += 1;
                endpoint->ep_st.cred_retnd += rpt->credits;
@@ -1264,21 +1563,14 @@ static void htc_proc_cred_rpt(struct htc_target *target,
                tot_credits += rpt->credits;
        }
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
-                  "report indicated %d credits to distribute\n",
-                  tot_credits);
-
        if (dist) {
                /*
                 * This was a credit return based on a completed send
                 * operations note, this is done with the lock held
                 */
-               ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
-                          target->cred_dist_cntxt, &target->cred_dist_list);
-
-               ath6k_credit_distribute(target->cred_dist_cntxt,
-                                       &target->cred_dist_list,
-                                       HTC_CREDIT_DIST_SEND_COMPLETE);
+               ath6kl_credit_distribute(target->credit_info,
+                                        &target->cred_dist_list,
+                                        HTC_CREDIT_DIST_SEND_COMPLETE);
        }
 
        spin_unlock_bh(&target->tx_lock);
@@ -1320,14 +1612,15 @@ static int htc_parse_trailer(struct htc_target *target,
                if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
                    && next_lk_ahds) {
 
-                       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                                  "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+                       ath6kl_dbg(ATH6KL_DBG_HTC,
+                                  "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
                                   lk_ahd->pre_valid, lk_ahd->post_valid);
 
                        /* look ahead bytes are valid, copy them over */
                        memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
 
-                       ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+                       ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+                                       "htc rx next look ahead",
                                        "", next_lk_ahds, 4);
 
                        *n_lk_ahds = 1;
@@ -1346,7 +1639,7 @@ static int htc_parse_trailer(struct htc_target *target,
                        bundle_lkahd_rpt =
                                (struct htc_bundle_lkahd_rpt *) record_buf;
 
-                       ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+                       ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
                                        "", record_buf, record->len);
 
                        for (i = 0; i < len; i++) {
@@ -1378,10 +1671,8 @@ static int htc_proc_trailer(struct htc_target *target,
        u8 *record_buf;
        u8 *orig_buf;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
-
-       ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
-                       buf, len);
+       ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
+       ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
 
        orig_buf = buf;
        orig_len = len;
@@ -1418,7 +1709,7 @@ static int htc_proc_trailer(struct htc_target *target,
        }
 
        if (status)
-               ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+               ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
                                "", orig_buf, orig_len);
 
        return status;
@@ -1436,9 +1727,6 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
        if (n_lkahds != NULL)
                *n_lkahds = 0;
 
-       ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
-                       packet->buf, packet->act_len);
-
        /*
         * NOTE: we cannot assume the alignment of buf, so we use the safe
         * macros to retrieve 16 bit fields.
@@ -1480,9 +1768,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
        if (lk_ahd != packet->info.rx.exp_hdr) {
                ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
                           __func__, packet, packet->info.rx.rx_flags);
-               ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+               ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
                                "", &packet->info.rx.exp_hdr, 4);
-               ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+               ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
                                "", (u8 *)&lk_ahd, sizeof(lk_ahd));
                status = -ENOMEM;
                goto fail_rx;
@@ -1518,15 +1806,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
 
 fail_rx:
        if (status)
-               ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
-                               "", packet->buf,
-                               packet->act_len < 256 ? packet->act_len : 256);
-       else {
-               if (packet->act_len > 0)
-                       ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
-                                       "HTC - Application Msg", "",
-                                       packet->buf, packet->act_len);
-       }
+               ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
+                               "", packet->buf, packet->act_len);
 
        return status;
 }
@@ -1534,8 +1815,8 @@ fail_rx:
 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
                                   struct htc_packet *packet)
 {
-               ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                          "htc calling ep %d recv callback on packet 0x%p\n",
+               ath6kl_dbg(ATH6KL_DBG_HTC,
+                          "htc rx complete ep %d packet 0x%p\n",
                           endpoint->eid, packet);
                endpoint->ep_cb.rx(endpoint->target, packet);
 }
@@ -1571,9 +1852,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
 
        len = 0;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                  "%s(): (numpackets: %d , actual : %d)\n",
-                  __func__, get_queue_depth(rxq), n_scat_pkt);
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+                  "htc rx bundle depth %d pkts %d\n",
+                  get_queue_depth(rxq), n_scat_pkt);
 
        scat_req = hif_scatter_req_get(target->dev->ar);
 
@@ -1620,7 +1901,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
        scat_req->len = len;
        scat_req->scat_entries = i;
 
-       status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+       status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
 
        if (!status)
                *n_pkt_fetched = i;
@@ -1643,7 +1924,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
        int status = 0;
 
        list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
-               list_del(&packet->list);
                ep = &target->endpoint[packet->endpoint];
 
                /* process header for each of the recv packet */
@@ -1652,6 +1932,8 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
                if (status)
                        return status;
 
+               list_del(&packet->list);
+
                if (list_empty(comp_pktq)) {
                        /*
                         * Last packet's more packet flag is set
@@ -1686,11 +1968,15 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
        int fetched_pkts;
        bool part_bundle = false;
        int status = 0;
+       struct list_head tmp_rxq;
+       struct htc_packet *packet, *tmp_pkt;
 
        /* now go fetch the list of HTC packets */
        while (!list_empty(rx_pktq)) {
                fetched_pkts = 0;
 
+               INIT_LIST_HEAD(&tmp_rxq);
+
                if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
                        /*
                         * There are enough packets to attempt a
@@ -1698,28 +1984,27 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
                         * allowed.
                         */
                        status = ath6kl_htc_rx_bundle(target, rx_pktq,
-                                                     comp_pktq,
+                                                     &tmp_rxq,
                                                      &fetched_pkts,
                                                      part_bundle);
                        if (status)
-                               return status;
+                               goto fail_rx;
 
                        if (!list_empty(rx_pktq))
                                part_bundle = true;
+
+                       list_splice_tail_init(&tmp_rxq, comp_pktq);
                }
 
                if (!fetched_pkts) {
-                       struct htc_packet *packet;
 
                        packet = list_first_entry(rx_pktq, struct htc_packet,
                                                   list);
 
-                       list_del(&packet->list);
-
                        /* fully synchronous */
                        packet->completion = NULL;
 
-                       if (!list_empty(rx_pktq))
+                       if (!list_is_singular(rx_pktq))
                                /*
                                 * look_aheads in all packet
                                 * except the last one in the
@@ -1731,18 +2016,42 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
                        /* go fetch the packet */
                        status = ath6kl_htc_rx_packet(target, packet,
                                                      packet->act_len);
+
+                       list_move_tail(&packet->list, &tmp_rxq);
+
                        if (status)
-                               return status;
+                               goto fail_rx;
 
-                       list_add_tail(&packet->list, comp_pktq);
+                       list_splice_tail_init(&tmp_rxq, comp_pktq);
                }
        }
 
+       return 0;
+
+fail_rx:
+
+       /*
+        * Cleanup any packets we allocated but didn't use to
+        * actually fetch any packets.
+        */
+
+       list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
+               list_del(&packet->list);
+               htc_reclaim_rxbuf(target, packet,
+                               &target->endpoint[packet->endpoint]);
+       }
+
+       list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
+               list_del(&packet->list);
+               htc_reclaim_rxbuf(target, packet,
+                               &target->endpoint[packet->endpoint]);
+       }
+
        return status;
 }
 
 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
-                                    u32 msg_look_ahead[], int *num_pkts)
+                                    u32 msg_look_ahead, int *num_pkts)
 {
        struct htc_packet *packets, *tmp_pkt;
        struct htc_endpoint *endpoint;
@@ -1759,7 +2068,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
         * On first entry copy the look_aheads into our temp array for
         * processing
         */
-       memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+       look_aheads[0] = msg_look_ahead;
 
        while (true) {
 
@@ -1827,15 +2136,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
        if (status) {
                ath6kl_err("failed to get pending recv messages: %d\n",
                           status);
-               /*
-                * Cleanup any packets we allocated but didn't use to
-                * actually fetch any packets.
-                */
-               list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
-                       list_del(&packets->list);
-                       htc_reclaim_rxbuf(target, packets,
-                                       &target->endpoint[packets->endpoint]);
-               }
 
                /* cleanup any packets in sync completion queue */
                list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
@@ -1846,7 +2146,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
 
                if (target->htc_flags & HTC_OP_STATE_STOPPING) {
                        ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
-                       ath6kldev_rx_control(target->dev, false);
+                       ath6kl_hif_rx_control(target->dev, false);
                }
        }
 
@@ -1856,7 +2156,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
         */
        if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
                ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
-               ath6kldev_rx_control(target->dev, false);
+               ath6kl_hif_rx_control(target->dev, false);
        }
        *num_pkts = n_fetched;
 
@@ -1874,12 +2174,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
        struct htc_frame_hdr *htc_hdr;
        u32 look_ahead;
 
-       if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+       if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
                               HTC_TARGET_RESPONSE_TIMEOUT))
                return NULL;
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-               "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+               "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
 
        htc_hdr = (struct htc_frame_hdr *)&look_ahead;
 
@@ -1943,8 +2243,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
 
        depth = get_queue_depth(pkt_queue);
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-               "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+               "htc rx add multiple ep id %d cnt %d len %d\n",
                first_pkt->endpoint, depth, first_pkt->buf_len);
 
        endpoint = &target->endpoint[first_pkt->endpoint];
@@ -1969,8 +2269,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
        /* check if we are blocked waiting for a new buffer */
        if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
                if (target->ep_waiting == first_pkt->endpoint) {
-                       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                               "receiver was blocked on ep:%d, unblocking.\n",
+                       ath6kl_dbg(ATH6KL_DBG_HTC,
+                               "htc rx blocked on ep %d, unblocking\n",
                                target->ep_waiting);
                        target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
                        target->ep_waiting = ENDPOINT_MAX;
@@ -1982,7 +2282,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
 
        if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
                /* TODO : implement a buffer threshold count? */
-               ath6kldev_rx_control(target->dev, true);
+               ath6kl_hif_rx_control(target->dev, true);
 
        return status;
 }
@@ -2004,8 +2304,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
                                         &endpoint->rx_bufq, list) {
                        list_del(&packet->list);
                        spin_unlock_bh(&target->rx_lock);
-                       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                                  "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+                       ath6kl_dbg(ATH6KL_DBG_HTC,
+                                  "htc rx flush pkt 0x%p  len %d  ep %d\n",
                                   packet, packet->buf_len,
                                   packet->endpoint);
                        dev_kfree_skb(packet->pkt_cntxt);
@@ -2028,8 +2328,8 @@ int ath6kl_htc_conn_service(struct htc_target *target,
        unsigned int max_msg_sz = 0;
        int status = 0;
 
-       ath6kl_dbg(ATH6KL_DBG_TRC,
-                  "htc_conn_service, target:0x%p service id:0x%X\n",
+       ath6kl_dbg(ATH6KL_DBG_HTC,
+                  "htc connect service target 0x%p service id 0x%x\n",
                   target, conn_req->svc_id);
 
        if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
@@ -2115,7 +2415,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
        endpoint->len_max = max_msg_sz;
        endpoint->ep_cb = conn_req->ep_cb;
        endpoint->cred_dist.svc_id = conn_req->svc_id;
-       endpoint->cred_dist.htc_rsvd = endpoint;
+       endpoint->cred_dist.htc_ep = endpoint;
        endpoint->cred_dist.endpoint = assigned_ep;
        endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
 
@@ -2172,6 +2472,7 @@ static void reset_ep_state(struct htc_target *target)
        }
 
        /* reset distribution list */
+       /* FIXME: free existing entries */
        INIT_LIST_HEAD(&target->cred_dist_list);
 }
 
@@ -2201,8 +2502,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
        target->msg_per_bndl_max = min(target->max_scat_entries,
                                       target->msg_per_bndl_max);
 
-       ath6kl_dbg(ATH6KL_DBG_TRC,
-                  "htc bundling allowed. max msg per htc bundle: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT,
+                  "htc bundling allowed msg_per_bndl_max %d\n",
                   target->msg_per_bndl_max);
 
        /* Max rx bundle size is limited by the max tx bundle size */
@@ -2211,7 +2512,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
        target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
                                     target->max_xfer_szper_scatreq);
 
-       ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
                   target->max_rx_bndl_sz, target->max_tx_bndl_sz);
 
        if (target->max_tx_bndl_sz)
@@ -2265,8 +2566,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
        target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
        target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
 
-       ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
-                  "target ready: credits: %d credit size: %d\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT,
+                  "htc target ready credits %d size %d\n",
                   target->tgt_creds, target->tgt_cred_sz);
 
        /* check if this is an extended ready message */
@@ -2280,7 +2581,7 @@ int ath6kl_htc_wait_target(struct htc_target *target)
                target->msg_per_bndl_max = 0;
        }
 
-       ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
                  (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
                  target->htc_tgt_ver);
 
@@ -2300,6 +2601,10 @@ int ath6kl_htc_wait_target(struct htc_target *target)
        status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
 
        if (status)
+               /*
+                * FIXME: this call doesn't make sense, the caller should
+                * call ath6kl_htc_cleanup() when it wants remove htc
+                */
                ath6kl_hif_cleanup_scatter(target->dev->ar);
 
 fail_wait_target:
@@ -2320,8 +2625,11 @@ int ath6kl_htc_start(struct htc_target *target)
        struct htc_packet *packet;
        int status;
 
+       memset(&target->dev->irq_proc_reg, 0,
+              sizeof(target->dev->irq_proc_reg));
+
        /* Disable interrupts at the chip level */
-       ath6kldev_disable_intrs(target->dev);
+       ath6kl_hif_disable_intrs(target->dev);
 
        target->htc_flags = 0;
        target->rx_st_flags = 0;
@@ -2334,8 +2642,8 @@ int ath6kl_htc_start(struct htc_target *target)
        }
 
        /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
-       ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
-                         target->tgt_creds);
+       ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
+                          target->tgt_creds);
 
        dump_cred_dist_stats(target);
 
@@ -2346,7 +2654,7 @@ int ath6kl_htc_start(struct htc_target *target)
                return status;
 
        /* unmask interrupts */
-       status = ath6kldev_unmask_intrs(target->dev);
+       status = ath6kl_hif_unmask_intrs(target->dev);
 
        if (status)
                ath6kl_htc_stop(target);
@@ -2354,6 +2662,44 @@ int ath6kl_htc_start(struct htc_target *target)
        return status;
 }
 
+static int ath6kl_htc_reset(struct htc_target *target)
+{
+       u32 block_size, ctrl_bufsz;
+       struct htc_packet *packet;
+       int i;
+
+       reset_ep_state(target);
+
+       block_size = target->dev->ar->mbox_info.block_size;
+
+       ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+                     (block_size + HTC_HDR_LENGTH) :
+                     (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+       for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+               packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+               if (!packet)
+                       return -ENOMEM;
+
+               packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+               if (!packet->buf_start) {
+                       kfree(packet);
+                       return -ENOMEM;
+               }
+
+               packet->buf_len = ctrl_bufsz;
+               if (i < NUM_CONTROL_RX_BUFFERS) {
+                       packet->act_len = 0;
+                       packet->buf = packet->buf_start;
+                       packet->endpoint = ENDPOINT_0;
+                       list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+               } else
+                       list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+       }
+
+       return 0;
+}
+
 /* htc_stop: stop interrupt reception, and flush all queued buffers */
 void ath6kl_htc_stop(struct htc_target *target)
 {
@@ -2366,21 +2712,19 @@ void ath6kl_htc_stop(struct htc_target *target)
         * function returns all pending HIF I/O has completed, we can
         * safely flush the queues.
         */
-       ath6kldev_mask_intrs(target->dev);
+       ath6kl_hif_mask_intrs(target->dev);
 
        ath6kl_htc_flush_txep_all(target);
 
        ath6kl_htc_flush_rx_buf(target);
 
-       reset_ep_state(target);
+       ath6kl_htc_reset(target);
 }
 
 void *ath6kl_htc_create(struct ath6kl *ar)
 {
        struct htc_target *target = NULL;
-       struct htc_packet *packet;
-       int status = 0, i = 0;
-       u32 block_size, ctrl_bufsz;
+       int status = 0;
 
        target = kzalloc(sizeof(*target), GFP_KERNEL);
        if (!target) {
@@ -2392,7 +2736,7 @@ void *ath6kl_htc_create(struct ath6kl *ar)
        if (!target->dev) {
                ath6kl_err("unable to allocate memory\n");
                status = -ENOMEM;
-               goto fail_create_htc;
+               goto err_htc_cleanup;
        }
 
        spin_lock_init(&target->htc_lock);
@@ -2407,49 +2751,20 @@ void *ath6kl_htc_create(struct ath6kl *ar)
        target->dev->htc_cnxt = target;
        target->ep_waiting = ENDPOINT_MAX;
 
-       reset_ep_state(target);
-
-       status = ath6kldev_setup(target->dev);
-
+       status = ath6kl_hif_setup(target->dev);
        if (status)
-               goto fail_create_htc;
-
-       block_size = ar->mbox_info.block_size;
-
-       ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
-                     (block_size + HTC_HDR_LENGTH) :
-                     (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
-
-       for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
-               packet = kzalloc(sizeof(*packet), GFP_KERNEL);
-               if (!packet)
-                       break;
+               goto err_htc_cleanup;
 
-               packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
-               if (!packet->buf_start) {
-                       kfree(packet);
-                       break;
-               }
+       status = ath6kl_htc_reset(target);
+       if (status)
+               goto err_htc_cleanup;
 
-               packet->buf_len = ctrl_bufsz;
-               if (i < NUM_CONTROL_RX_BUFFERS) {
-                       packet->act_len = 0;
-                       packet->buf = packet->buf_start;
-                       packet->endpoint = ENDPOINT_0;
-                       list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
-               } else
-                       list_add_tail(&packet->list, &target->free_ctrl_txbuf);
-       }
+       return target;
 
-fail_create_htc:
-       if (i != NUM_CONTROL_BUFFERS || status) {
-               if (target) {
-                       ath6kl_htc_cleanup(target);
-                       target = NULL;
-               }
-       }
+err_htc_cleanup:
+       ath6kl_htc_cleanup(target);
 
-       return target;
+       return NULL;
 }
 
 /* cleanup the HTC instance */
index 8ce0c2c..57672e1 100644 (file)
@@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist {
        int cred_per_msg;
 
        /* reserved for HTC use */
-       void *htc_rsvd;
+       struct htc_endpoint *htc_ep;
 
        /*
         * current depth of TX queue , i.e. messages waiting for credits
@@ -414,9 +414,11 @@ enum htc_credit_dist_reason {
        HTC_CREDIT_DIST_SEEK_CREDITS,
 };
 
-struct htc_credit_state_info {
+struct ath6kl_htc_credit_info {
        int total_avail_credits;
        int cur_free_credits;
+
+       /* list of lowest priority endpoints */
        struct list_head lowestpri_ep_dist;
 };
 
@@ -508,10 +510,13 @@ struct ath6kl_device;
 /* our HTC target state */
 struct htc_target {
        struct htc_endpoint endpoint[ENDPOINT_MAX];
+
+       /* contains struct htc_endpoint_credit_dist */
        struct list_head cred_dist_list;
+
        struct list_head free_ctrl_txbuf;
        struct list_head free_ctrl_rxbuf;
-       struct htc_credit_state_info *cred_dist_cntxt;
+       struct ath6kl_htc_credit_info *credit_info;
        int tgt_creds;
        unsigned int tgt_cred_sz;
        spinlock_t htc_lock;
@@ -542,7 +547,7 @@ struct htc_target {
 
 void *ath6kl_htc_create(struct ath6kl *ar);
 void ath6kl_htc_set_credit_dist(struct htc_target *target,
-                               struct htc_credit_state_info *cred_info,
+                               struct ath6kl_htc_credit_info *cred_info,
                                u16 svc_pri_order[], int len);
 int ath6kl_htc_wait_target(struct htc_target *target);
 int ath6kl_htc_start(struct htc_target *target);
@@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
 int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
                                  struct list_head *pktq);
 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
-                                    u32 msg_look_ahead[], int *n_pkts);
+                                    u32 msg_look_ahead, int *n_pkts);
+
+int ath6kl_credit_setup(void *htc_handle,
+                       struct ath6kl_htc_credit_info *cred_info);
 
 static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
                                    u8 *buf, unsigned int len,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
deleted file mode 100644 (file)
index 171ad63..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2007-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HTC_HIF_H
-#define HTC_HIF_H
-
-#include "htc.h"
-#include "hif.h"
-
-#define ATH6KL_MAILBOXES       4
-
-/* HTC runs over mailbox 0 */
-#define HTC_MAILBOX    0
-
-#define ATH6KL_TARGET_DEBUG_INTR_MASK     0x01
-
-#define OTHER_INTS_ENABLED             (INT_STATUS_ENABLE_ERROR_MASK | \
-                                       INT_STATUS_ENABLE_CPU_MASK   |  \
-                                       INT_STATUS_ENABLE_COUNTER_MASK)
-
-#define ATH6KL_REG_IO_BUFFER_SIZE                      32
-#define ATH6KL_MAX_REG_IO_BUFFERS                      8
-#define ATH6KL_SCATTER_ENTRIES_PER_REQ            16
-#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER      (16 * 1024)
-#define ATH6KL_SCATTER_REQS                       4
-
-#ifndef A_CACHE_LINE_PAD
-#define A_CACHE_LINE_PAD                        128
-#endif
-#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ        2
-#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER      (4 * 1024)
-
-struct ath6kl_irq_proc_registers {
-       u8 host_int_status;
-       u8 cpu_int_status;
-       u8 error_int_status;
-       u8 counter_int_status;
-       u8 mbox_frame;
-       u8 rx_lkahd_valid;
-       u8 host_int_status2;
-       u8 gmbox_rx_avail;
-       __le32 rx_lkahd[2];
-       __le32 rx_gmbox_lkahd_alias[2];
-} __packed;
-
-struct ath6kl_irq_enable_reg {
-       u8 int_status_en;
-       u8 cpu_int_status_en;
-       u8 err_int_status_en;
-       u8 cntr_int_status_en;
-} __packed;
-
-struct ath6kl_device {
-       spinlock_t lock;
-       u8 pad1[A_CACHE_LINE_PAD];
-       struct ath6kl_irq_proc_registers irq_proc_reg;
-       u8 pad2[A_CACHE_LINE_PAD];
-       struct ath6kl_irq_enable_reg irq_en_reg;
-       u8 pad3[A_CACHE_LINE_PAD];
-       struct htc_target *htc_cnxt;
-       struct ath6kl *ar;
-};
-
-int ath6kldev_setup(struct ath6kl_device *dev);
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
-int ath6kldev_mask_intrs(struct ath6kl_device *dev);
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
-                             u32 *lk_ahd, int timeout);
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
-int ath6kldev_disable_intrs(struct ath6kl_device *dev);
-
-int ath6kldev_rw_comp_handler(void *context, int status);
-int ath6kldev_intr_bh_handler(struct ath6kl *ar);
-
-/* Scatter Function and Definitions */
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
-                           struct hif_scatter_req *scat_req, bool read);
-
-#endif /*ATH6KL_H_ */
index c1d2366..30050af 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/moduleparam.h>
+#include <linux/errno.h>
 #include <linux/of.h>
 #include <linux/mmc/sdio_func.h>
 #include "core.h"
 
 unsigned int debug_mask;
 static unsigned int testmode;
+static bool suspend_cutpower;
 
 module_param(debug_mask, uint, 0644);
 module_param(testmode, uint, 0644);
+module_param(suspend_cutpower, bool, 0444);
 
 /*
  * Include definitions here that can be used to tune the WLAN module
@@ -73,37 +76,21 @@ struct sk_buff *ath6kl_buf_alloc(int size)
        return skb;
 }
 
-void ath6kl_init_profile_info(struct ath6kl *ar)
+void ath6kl_init_profile_info(struct ath6kl_vif *vif)
 {
-       ar->ssid_len = 0;
-       memset(ar->ssid, 0, sizeof(ar->ssid));
-
-       ar->dot11_auth_mode = OPEN_AUTH;
-       ar->auth_mode = NONE_AUTH;
-       ar->prwise_crypto = NONE_CRYPT;
-       ar->prwise_crypto_len = 0;
-       ar->grp_crypto = NONE_CRYPT;
-       ar->grp_crypto_len = 0;
-       memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
-       memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
-       memset(ar->bssid, 0, sizeof(ar->bssid));
-       ar->bss_ch = 0;
-       ar->nw_type = ar->next_mode = INFRA_NETWORK;
-}
-
-static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
-{
-       switch (ar->nw_type) {
-       case INFRA_NETWORK:
-               return HI_OPTION_FW_MODE_BSS_STA;
-       case ADHOC_NETWORK:
-               return HI_OPTION_FW_MODE_IBSS;
-       case AP_NETWORK:
-               return HI_OPTION_FW_MODE_AP;
-       default:
-               ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
-               return 0xff;
-       }
+       vif->ssid_len = 0;
+       memset(vif->ssid, 0, sizeof(vif->ssid));
+
+       vif->dot11_auth_mode = OPEN_AUTH;
+       vif->auth_mode = NONE_AUTH;
+       vif->prwise_crypto = NONE_CRYPT;
+       vif->prwise_crypto_len = 0;
+       vif->grp_crypto = NONE_CRYPT;
+       vif->grp_crypto_len = 0;
+       memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+       memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+       memset(vif->bssid, 0, sizeof(vif->bssid));
+       vif->bss_ch = 0;
 }
 
 static int ath6kl_set_host_app_area(struct ath6kl *ar)
@@ -120,7 +107,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar)
                return -EIO;
 
        address = TARG_VTOP(ar->target_type, data);
-       host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+       host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
        if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
                              sizeof(struct host_app_area)))
                return -EIO;
@@ -258,40 +245,12 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
        return 0;
 }
 
-static void ath6kl_init_control_info(struct ath6kl *ar)
+void ath6kl_init_control_info(struct ath6kl_vif *vif)
 {
-       u8 ctr;
-
-       clear_bit(WMI_ENABLED, &ar->flag);
-       ath6kl_init_profile_info(ar);
-       ar->def_txkey_index = 0;
-       memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
-       ar->ch_hint = 0;
-       ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
-       ar->listen_intvl_b = 0;
-       ar->tx_pwr = 0;
-       clear_bit(SKIP_SCAN, &ar->flag);
-       set_bit(WMM_ENABLED, &ar->flag);
-       ar->intra_bss = 1;
-       memset(&ar->sc_params, 0, sizeof(ar->sc_params));
-       ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
-       ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
-       ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
-
-       memset((u8 *)ar->sta_list, 0,
-              AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
-
-       spin_lock_init(&ar->mcastpsq_lock);
-
-       /* Init the PS queues */
-       for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
-               spin_lock_init(&ar->sta_list[ctr].psq_lock);
-               skb_queue_head_init(&ar->sta_list[ctr].psq);
-       }
-
-       skb_queue_head_init(&ar->mcastpsq);
-
-       memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+       ath6kl_init_profile_info(vif);
+       vif->def_txkey_index = 0;
+       memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+       vif->ch_hint = 0;
 }
 
 /*
@@ -341,62 +300,7 @@ out:
        return status;
 }
 
-#define REG_DUMP_COUNT_AR6003   60
-#define REGISTER_DUMP_LEN_MAX   60
-
-static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
-{
-       u32 address;
-       u32 regdump_loc = 0;
-       int status;
-       u32 regdump_val[REGISTER_DUMP_LEN_MAX];
-       u32 i;
-
-       if (ar->target_type != TARGET_TYPE_AR6003)
-               return;
-
-       /* the reg dump pointer is copied to the host interest area */
-       address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
-       address = TARG_VTOP(ar->target_type, address);
-
-       /* read RAM location through diagnostic window */
-       status = ath6kl_diag_read32(ar, address, &regdump_loc);
-
-       if (status || !regdump_loc) {
-               ath6kl_err("failed to get ptr to register dump area\n");
-               return;
-       }
-
-       ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
-               regdump_loc);
-       regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
-
-       /* fetch register dump data */
-       status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
-                                 REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
-
-       if (status) {
-               ath6kl_err("failed to get register dump\n");
-               return;
-       }
-       ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
-
-       for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
-               ath6kl_dbg(ATH6KL_DBG_TRC, " %d :  0x%8.8X\n",
-                          i, regdump_val[i]);
-
-}
-
-void ath6kl_target_failure(struct ath6kl *ar)
-{
-       ath6kl_err("target asserted\n");
-
-       /* try dumping target assertion information (if any) */
-       ath6kl_dump_target_assert_info(ar);
-
-}
-
-static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
 {
        int status = 0;
        int ret;
@@ -406,46 +310,50 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
         * default values. Required if checksum offload is needed. Set
         * RxMetaVersion to 2.
         */
-       if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+       if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
                                               ar->rx_meta_ver, 0, 0)) {
                ath6kl_err("unable to set the rx frame format\n");
                status = -EIO;
        }
 
        if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
-               if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+               if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
                     IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
                        ath6kl_err("unable to set power save fail event policy\n");
                        status = -EIO;
                }
 
        if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
-               if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+               if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
                     WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
                        ath6kl_err("unable to set barker preamble policy\n");
                        status = -EIO;
                }
 
-       if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+       if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
                        WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
                ath6kl_err("unable to set keep alive interval\n");
                status = -EIO;
        }
 
-       if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+       if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
                        WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
                ath6kl_err("unable to set disconnect timeout\n");
                status = -EIO;
        }
 
        if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
-               if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+               if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
                        ath6kl_err("unable to set txop bursting\n");
                        status = -EIO;
                }
 
+       /*
+        * FIXME: Make sure p2p configurations are not applied to
+        * non-p2p capable interfaces when multivif support is enabled.
+        */
        if (ar->p2p) {
-               ret = ath6kl_wmi_info_req_cmd(ar->wmi,
+               ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
                                              P2P_FLAG_CAPABILITIES_REQ |
                                              P2P_FLAG_MACADDR_REQ |
                                              P2P_FLAG_HMODEL_REQ);
@@ -457,9 +365,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
                }
        }
 
+       /*
+        * FIXME: Make sure p2p configurations are not applied to
+        * non-p2p capable interfaces when multivif support is enabled.
+        */
        if (ar->p2p) {
                /* Enable Probe Request reporting for P2P */
-               ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true);
+               ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
                if (ret) {
                        ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
                                   "Request reporting (%d)\n", ret);
@@ -472,13 +384,44 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
 int ath6kl_configure_target(struct ath6kl *ar)
 {
        u32 param, ram_reserved_size;
-       u8 fw_iftype;
+       u8 fw_iftype, fw_mode = 0, fw_submode = 0;
+       int i;
 
-       fw_iftype = ath6kl_get_fw_iftype(ar);
-       if (fw_iftype == 0xff)
-               return -EINVAL;
+       /*
+        * Note: Even though the firmware interface type is
+        * chosen as BSS_STA for all three interfaces, can
+        * be configured to IBSS/AP as long as the fw submode
+        * remains normal mode (0 - AP, STA and IBSS). But
+        * due to an target assert in firmware only one interface is
+        * configured for now.
+        */
+       fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
+
+       for (i = 0; i < MAX_NUM_VIF; i++)
+               fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
+
+       /*
+        * By default, submodes :
+        *              vif[0] - AP/STA/IBSS
+        *              vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
+        *              vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+        */
+
+       for (i = 0; i < ar->max_norm_iface; i++)
+               fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+                             (i * HI_OPTION_FW_SUBMODE_BITS);
+
+       for (i = ar->max_norm_iface; i < MAX_NUM_VIF; i++)
+               fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+                             (i * HI_OPTION_FW_SUBMODE_BITS);
+
+       /*
+        * FIXME: This needs to be removed once the multivif
+        * support is enabled.
+        */
+       if (ar->p2p)
+               fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
 
-       /* Tell target which HTC version it is used*/
        param = HTC_PROTOCOL_VERSION;
        if (ath6kl_bmi_write(ar,
                             ath6kl_get_hi_item_addr(ar,
@@ -499,12 +442,10 @@ int ath6kl_configure_target(struct ath6kl *ar)
                return -EIO;
        }
 
-       param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
-       param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
-       if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) {
-               param |= HI_OPTION_FW_SUBMODE_P2PDEV <<
-                       HI_OPTION_FW_SUBMODE_SHIFT;
-       }
+       param |= (MAX_NUM_VIF << HI_OPTION_NUM_DEV_SHIFT);
+       param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
+       param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
+
        param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
        param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
 
@@ -553,68 +494,34 @@ int ath6kl_configure_target(struct ath6kl *ar)
        return 0;
 }
 
-struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+void ath6kl_core_free(struct ath6kl *ar)
 {
-       struct net_device *dev;
-       struct ath6kl *ar;
-       struct wireless_dev *wdev;
-
-       wdev = ath6kl_cfg80211_init(sdev);
-       if (!wdev) {
-               ath6kl_err("ath6kl_cfg80211_init failed\n");
-               return NULL;
-       }
-
-       ar = wdev_priv(wdev);
-       ar->dev = sdev;
-       ar->wdev = wdev;
-       wdev->iftype = NL80211_IFTYPE_STATION;
-
-       if (ath6kl_debug_init(ar)) {
-               ath6kl_err("Failed to initialize debugfs\n");
-               ath6kl_cfg80211_deinit(ar);
-               return NULL;
-       }
-
-       dev = alloc_netdev(0, "wlan%d", ether_setup);
-       if (!dev) {
-               ath6kl_err("no memory for network device instance\n");
-               ath6kl_cfg80211_deinit(ar);
-               return NULL;
-       }
-
-       dev->ieee80211_ptr = wdev;
-       SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
-       wdev->netdev = dev;
-       ar->sme_state = SME_DISCONNECTED;
-
-       init_netdev(dev);
+       wiphy_free(ar->wiphy);
+}
 
-       ar->net_dev = dev;
-       set_bit(WLAN_ENABLED, &ar->flag);
+void ath6kl_core_cleanup(struct ath6kl *ar)
+{
+       ath6kl_hif_power_off(ar);
 
-       ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+       destroy_workqueue(ar->ath6kl_wq);
 
-       spin_lock_init(&ar->lock);
+       if (ar->htc_target)
+               ath6kl_htc_cleanup(ar->htc_target);
 
-       ath6kl_init_control_info(ar);
-       init_waitqueue_head(&ar->event_wq);
-       sema_init(&ar->sem, 1);
-       clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+       ath6kl_cookie_cleanup(ar);
 
-       INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+       ath6kl_cleanup_amsdu_rxbufs(ar);
 
-       setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
-                   (unsigned long) dev);
+       ath6kl_bmi_cleanup(ar);
 
-       return ar;
-}
+       ath6kl_debug_cleanup(ar);
 
-int ath6kl_unavail_ev(struct ath6kl *ar)
-{
-       ath6kl_destroy(ar->net_dev, 1);
+       kfree(ar->fw_board);
+       kfree(ar->fw_otp);
+       kfree(ar->fw);
+       kfree(ar->fw_patch);
 
-       return 0;
+       ath6kl_deinit_ieee80211_hw(ar);
 }
 
 /* firmware upload */
@@ -1182,6 +1089,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
 static int ath6kl_upload_otp(struct ath6kl *ar)
 {
        u32 address, param;
+       bool from_hw = false;
        int ret;
 
        if (WARN_ON(ar->fw_otp == NULL))
@@ -1210,15 +1118,20 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
                return ret;
        }
 
-       ar->hw.app_start_override_addr = address;
+       if (ar->hw.app_start_override_addr == 0) {
+               ar->hw.app_start_override_addr = address;
+               from_hw = true;
+       }
 
-       ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
+                  from_hw ? " (from hw)" : "",
                   ar->hw.app_start_override_addr);
 
        /* execute the OTP code */
-       ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address);
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
+                  ar->hw.app_start_override_addr);
        param = 0;
-       ath6kl_bmi_execute(ar, address, &param);
+       ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
 
        return ret;
 }
@@ -1420,6 +1333,10 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
                ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
                ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
                ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
+
+               /* hw2.0 needs override address hardcoded */
+               ar->hw.app_start_override_addr = 0x944C00;
+
                break;
        case AR6003_REV3_VERSION:
                ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
@@ -1451,71 +1368,56 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
        return 0;
 }
 
-static int ath6kl_init(struct net_device *dev)
+int ath6kl_init_hw_start(struct ath6kl *ar)
 {
-       struct ath6kl *ar = ath6kl_priv(dev);
-       int status = 0;
-       s32 timeleft;
+       long timeleft;
+       int ret, i;
 
-       if (!ar)
-               return -EIO;
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
+
+       ret = ath6kl_hif_power_on(ar);
+       if (ret)
+               return ret;
+
+       ret = ath6kl_configure_target(ar);
+       if (ret)
+               goto err_power_off;
+
+       ret = ath6kl_init_upload(ar);
+       if (ret)
+               goto err_power_off;
 
        /* Do we need to finish the BMI phase */
+       /* FIXME: return error from ath6kl_bmi_done() */
        if (ath6kl_bmi_done(ar)) {
-               status = -EIO;
-               goto ath6kl_init_done;
+               ret = -EIO;
+               goto err_power_off;
        }
 
-       /* Indicate that WMI is enabled (although not ready yet) */
-       set_bit(WMI_ENABLED, &ar->flag);
-       ar->wmi = ath6kl_wmi_init(ar);
-       if (!ar->wmi) {
-               ath6kl_err("failed to initialize wmi\n");
-               status = -EIO;
-               goto ath6kl_init_done;
-       }
-
-       ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
-
        /*
         * The reason we have to wait for the target here is that the
         * driver layer has to init BMI in order to set the host block
         * size.
         */
        if (ath6kl_htc_wait_target(ar->htc_target)) {
-               status = -EIO;
-               goto err_node_cleanup;
+               ret = -EIO;
+               goto err_power_off;
        }
 
        if (ath6kl_init_service_ep(ar)) {
-               status = -EIO;
+               ret = -EIO;
                goto err_cleanup_scatter;
        }
 
-       /* setup access class priority mappings */
-       ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest  */
-       ar->ac_stream_pri_map[WMM_AC_BE] = 1;
-       ar->ac_stream_pri_map[WMM_AC_VI] = 2;
-       ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
-
-       /* give our connected endpoints some buffers */
-       ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
-       ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
-
-       /* allocate some buffers that handle larger AMSDU frames */
-       ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
-
        /* setup credit distribution */
-       ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
-
-       ath6kl_cookie_init(ar);
+       ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
 
        /* start HTC */
-       status = ath6kl_htc_start(ar->htc_target);
-
-       if (status) {
+       ret = ath6kl_htc_start(ar->htc_target);
+       if (ret) {
+               /* FIXME: call this */
                ath6kl_cookie_cleanup(ar);
-               goto err_rxbuf_cleanup;
+               goto err_cleanup_scatter;
        }
 
        /* Wait for Wmi event to be ready */
@@ -1529,51 +1431,69 @@ static int ath6kl_init(struct net_device *dev)
        if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
                ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
                           ATH6KL_ABI_VERSION, ar->version.abi_ver);
-               status = -EIO;
+               ret = -EIO;
                goto err_htc_stop;
        }
 
        if (!timeleft || signal_pending(current)) {
                ath6kl_err("wmi is not ready or wait was interrupted\n");
-               status = -EIO;
+               ret = -EIO;
                goto err_htc_stop;
        }
 
        ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
 
        /* communicate the wmi protocol verision to the target */
+       /* FIXME: return error */
        if ((ath6kl_set_host_app_area(ar)) != 0)
                ath6kl_err("unable to set the host app area\n");
 
-       ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
-                        ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+       for (i = 0; i < MAX_NUM_VIF; i++) {
+               ret = ath6kl_target_config_wlan_params(ar, i);
+               if (ret)
+                       goto err_htc_stop;
+       }
 
-       ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+       ar->state = ATH6KL_STATE_ON;
 
-       status = ath6kl_target_config_wlan_params(ar);
-       if (!status)
-               goto ath6kl_init_done;
+       return 0;
 
 err_htc_stop:
        ath6kl_htc_stop(ar->htc_target);
-err_rxbuf_cleanup:
-       ath6kl_htc_flush_rx_buf(ar->htc_target);
-       ath6kl_cleanup_amsdu_rxbufs(ar);
 err_cleanup_scatter:
        ath6kl_hif_cleanup_scatter(ar);
-err_node_cleanup:
-       ath6kl_wmi_shutdown(ar->wmi);
-       clear_bit(WMI_ENABLED, &ar->flag);
-       ar->wmi = NULL;
+err_power_off:
+       ath6kl_hif_power_off(ar);
 
-ath6kl_init_done:
-       return status;
+       return ret;
+}
+
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+       int ret;
+
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
+
+       ath6kl_htc_stop(ar->htc_target);
+
+       ath6kl_hif_stop(ar);
+
+       ath6kl_bmi_reset(ar);
+
+       ret = ath6kl_hif_power_off(ar);
+       if (ret)
+               ath6kl_warn("failed to power off hif: %d\n", ret);
+
+       ar->state = ATH6KL_STATE_OFF;
+
+       return 0;
 }
 
 int ath6kl_core_init(struct ath6kl *ar)
 {
-       int ret = 0;
        struct ath6kl_bmi_target_info targ_info;
+       struct net_device *ndev;
+       int ret = 0, i;
 
        ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
        if (!ar->ath6kl_wq)
@@ -1583,145 +1503,226 @@ int ath6kl_core_init(struct ath6kl *ar)
        if (ret)
                goto err_wq;
 
-       ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+       /*
+        * Turn on power to get hardware (target) version and leave power
+        * on delibrately as we will boot the hardware anyway within few
+        * seconds.
+        */
+       ret = ath6kl_hif_power_on(ar);
        if (ret)
                goto err_bmi_cleanup;
 
+       ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+       if (ret)
+               goto err_power_off;
+
        ar->version.target_ver = le32_to_cpu(targ_info.version);
        ar->target_type = le32_to_cpu(targ_info.type);
-       ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
+       ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
 
        ret = ath6kl_init_hw_params(ar);
        if (ret)
-               goto err_bmi_cleanup;
-
-       ret = ath6kl_configure_target(ar);
-       if (ret)
-               goto err_bmi_cleanup;
+               goto err_power_off;
 
        ar->htc_target = ath6kl_htc_create(ar);
 
        if (!ar->htc_target) {
                ret = -ENOMEM;
-               goto err_bmi_cleanup;
-       }
-
-       ar->aggr_cntxt = aggr_init(ar->net_dev);
-       if (!ar->aggr_cntxt) {
-               ath6kl_err("failed to initialize aggr\n");
-               ret = -ENOMEM;
-               goto err_htc_cleanup;
+               goto err_power_off;
        }
 
        ret = ath6kl_fetch_firmwares(ar);
        if (ret)
                goto err_htc_cleanup;
 
-       ret = ath6kl_init_upload(ar);
-       if (ret)
+       /* FIXME: we should free all firmwares in the error cases below */
+
+       /* Indicate that WMI is enabled (although not ready yet) */
+       set_bit(WMI_ENABLED, &ar->flag);
+       ar->wmi = ath6kl_wmi_init(ar);
+       if (!ar->wmi) {
+               ath6kl_err("failed to initialize wmi\n");
+               ret = -EIO;
                goto err_htc_cleanup;
+       }
 
-       ret = ath6kl_init(ar->net_dev);
+       ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
+
+       ret = ath6kl_register_ieee80211_hw(ar);
        if (ret)
-               goto err_htc_cleanup;
+               goto err_node_cleanup;
 
-       /* This runs the init function if registered */
-       ret = register_netdev(ar->net_dev);
+       ret = ath6kl_debug_init(ar);
        if (ret) {
-               ath6kl_err("register_netdev failed\n");
-               ath6kl_destroy(ar->net_dev, 0);
-               return ret;
+               wiphy_unregister(ar->wiphy);
+               goto err_node_cleanup;
+       }
+
+       for (i = 0; i < MAX_NUM_VIF; i++)
+               ar->avail_idx_map |= BIT(i);
+
+       rtnl_lock();
+
+       /* Add an initial station interface */
+       ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+                                   INFRA_NETWORK);
+
+       rtnl_unlock();
+
+       if (!ndev) {
+               ath6kl_err("Failed to instantiate a network device\n");
+               ret = -ENOMEM;
+               wiphy_unregister(ar->wiphy);
+               goto err_debug_init;
        }
 
-       set_bit(NETDEV_REGISTERED, &ar->flag);
 
        ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
-                       __func__, ar->net_dev->name, ar->net_dev, ar);
+                       __func__, ndev->name, ndev, ar);
+
+       /* setup access class priority mappings */
+       ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest  */
+       ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+       ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+       ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+       /* give our connected endpoints some buffers */
+       ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+       ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+       /* allocate some buffers that handle larger AMSDU frames */
+       ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+       ath6kl_cookie_init(ar);
+
+       ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+                        ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+       if (suspend_cutpower)
+               ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
+
+       ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+                           WIPHY_FLAG_HAVE_AP_SME |
+                           WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+       set_bit(FIRST_BOOT, &ar->flag);
+
+       ret = ath6kl_init_hw_start(ar);
+       if (ret) {
+               ath6kl_err("Failed to start hardware: %d\n", ret);
+               goto err_rxbuf_cleanup;
+       }
+
+       /*
+        * Set mac address which is received in ready event
+        * FIXME: Move to ath6kl_interface_add()
+        */
+       memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
 
        return ret;
 
+err_rxbuf_cleanup:
+       ath6kl_htc_flush_rx_buf(ar->htc_target);
+       ath6kl_cleanup_amsdu_rxbufs(ar);
+       rtnl_lock();
+       ath6kl_deinit_if_data(netdev_priv(ndev));
+       rtnl_unlock();
+       wiphy_unregister(ar->wiphy);
+err_debug_init:
+       ath6kl_debug_cleanup(ar);
+err_node_cleanup:
+       ath6kl_wmi_shutdown(ar->wmi);
+       clear_bit(WMI_ENABLED, &ar->flag);
+       ar->wmi = NULL;
 err_htc_cleanup:
        ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+       ath6kl_hif_power_off(ar);
 err_bmi_cleanup:
        ath6kl_bmi_cleanup(ar);
 err_wq:
        destroy_workqueue(ar->ath6kl_wq);
+
        return ret;
 }
 
-void ath6kl_stop_txrx(struct ath6kl *ar)
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
 {
-       struct net_device *ndev = ar->net_dev;
+       static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       bool discon_issued;
 
-       if (!ndev)
-               return;
+       netif_stop_queue(vif->ndev);
 
-       set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+       clear_bit(WLAN_ENABLED, &vif->flags);
 
-       if (down_interruptible(&ar->sem)) {
-               ath6kl_err("down_interruptible failed\n");
-               return;
-       }
+       if (wmi_ready) {
+               discon_issued = test_bit(CONNECTED, &vif->flags) ||
+                               test_bit(CONNECT_PEND, &vif->flags);
+               ath6kl_disconnect(vif);
+               del_timer(&vif->disconnect_timer);
 
-       if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
-               ath6kl_stop_endpoint(ndev, false, true);
+               if (discon_issued)
+                       ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+                                               (vif->nw_type & AP_NETWORK) ?
+                                               bcast_mac : vif->bssid,
+                                               0, NULL, 0);
+       }
 
-       clear_bit(WLAN_ENABLED, &ar->flag);
+       if (vif->scan_req) {
+               cfg80211_scan_done(vif->scan_req, true);
+               vif->scan_req = NULL;
+       }
 }
 
-/*
- * We need to differentiate between the surprise and planned removal of the
- * device because of the following consideration:
- *
- * - In case of surprise removal, the hcd already frees up the pending
- *   for the device and hence there is no need to unregister the function
- *   driver inorder to get these requests. For planned removal, the function
- *   driver has to explicitly unregister itself to have the hcd return all the
- *   pending requests before the data structures for the devices are freed up.
- *   Note that as per the current implementation, the function driver will
- *   end up releasing all the devices since there is no API to selectively
- *   release a particular device.
- *
- * - Certain commands issued to the target can be skipped for surprise
- *   removal since they will anyway not go through.
- */
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
+void ath6kl_stop_txrx(struct ath6kl *ar)
 {
-       struct ath6kl *ar;
+       struct ath6kl_vif *vif, *tmp_vif;
 
-       if (!dev || !ath6kl_priv(dev)) {
-               ath6kl_err("failed to get device structure\n");
+       set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+       if (down_interruptible(&ar->sem)) {
+               ath6kl_err("down_interruptible failed\n");
                return;
        }
 
-       ar = ath6kl_priv(dev);
-
-       destroy_workqueue(ar->ath6kl_wq);
-
-       if (ar->htc_target)
-               ath6kl_htc_cleanup(ar->htc_target);
-
-       aggr_module_destroy(ar->aggr_cntxt);
-
-       ath6kl_cookie_cleanup(ar);
-
-       ath6kl_cleanup_amsdu_rxbufs(ar);
+       spin_lock_bh(&ar->list_lock);
+       list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
+               list_del(&vif->list);
+               spin_unlock_bh(&ar->list_lock);
+               ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+               rtnl_lock();
+               ath6kl_deinit_if_data(vif);
+               rtnl_unlock();
+               spin_lock_bh(&ar->list_lock);
+       }
+       spin_unlock_bh(&ar->list_lock);
 
-       ath6kl_bmi_cleanup(ar);
+       clear_bit(WMI_READY, &ar->flag);
 
-       ath6kl_debug_cleanup(ar);
+       /*
+        * After wmi_shudown all WMI events will be dropped. We
+        * need to cleanup the buffers allocated in AP mode and
+        * give disconnect notification to stack, which usually
+        * happens in the disconnect_event. Simulate the disconnect
+        * event by calling the function directly. Sometimes
+        * disconnect_event will be received when the debug logs
+        * are collected.
+        */
+       ath6kl_wmi_shutdown(ar->wmi);
 
-       if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
-               unregister_netdev(dev);
-               clear_bit(NETDEV_REGISTERED, &ar->flag);
+       clear_bit(WMI_ENABLED, &ar->flag);
+       if (ar->htc_target) {
+               ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+               ath6kl_htc_stop(ar->htc_target);
        }
 
-       free_netdev(dev);
-
-       kfree(ar->fw_board);
-       kfree(ar->fw_otp);
-       kfree(ar->fw);
-       kfree(ar->fw_patch);
+       /*
+        * Try to reset the device if we can. The driver may have been
+        * configure NOT to reset the target during a debug session.
+        */
+       ath6kl_dbg(ATH6KL_DBG_TRC,
+                       "attempting to reset target on instance destroy\n");
+       ath6kl_reset_device(ar, ar->target_type, true, true);
 
-       ath6kl_cfg80211_deinit(ar);
+       clear_bit(WLAN_ENABLED, &ar->flag);
 }
index 30b5a53..5e5f4ca 100644 (file)
 #include "target.h"
 #include "debug.h"
 
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
 {
+       struct ath6kl *ar = vif->ar;
        struct ath6kl_sta *conn = NULL;
        u8 i, max_conn;
 
-       max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+       max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
 
        for (i = 0; i < max_conn; i++) {
                if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@@ -393,8 +394,8 @@ out:
 #define AR6003_RESET_CONTROL_ADDRESS 0x00004000
 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000
 
-static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
-                               bool wait_fot_compltn, bool cold_reset)
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+                        bool wait_fot_compltn, bool cold_reset)
 {
        int status = 0;
        u32 address;
@@ -425,102 +426,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
                ath6kl_err("failed to reset target\n");
 }
 
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
-                         bool get_dbglogs)
-{
-       struct ath6kl *ar = ath6kl_priv(dev);
-       static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-       bool discon_issued;
-
-       netif_stop_queue(dev);
-
-       /* disable the target and the interrupts associated with it */
-       if (test_bit(WMI_READY, &ar->flag)) {
-               discon_issued = (test_bit(CONNECTED, &ar->flag) ||
-                                test_bit(CONNECT_PEND, &ar->flag));
-               ath6kl_disconnect(ar);
-               if (!keep_profile)
-                       ath6kl_init_profile_info(ar);
-
-               del_timer(&ar->disconnect_timer);
-
-               clear_bit(WMI_READY, &ar->flag);
-               ath6kl_wmi_shutdown(ar->wmi);
-               clear_bit(WMI_ENABLED, &ar->flag);
-               ar->wmi = NULL;
-
-               /*
-                * After wmi_shudown all WMI events will be dropped. We
-                * need to cleanup the buffers allocated in AP mode and
-                * give disconnect notification to stack, which usually
-                * happens in the disconnect_event. Simulate the disconnect
-                * event by calling the function directly. Sometimes
-                * disconnect_event will be received when the debug logs
-                * are collected.
-                */
-               if (discon_issued)
-                       ath6kl_disconnect_event(ar, DISCONNECT_CMD,
-                                               (ar->nw_type & AP_NETWORK) ?
-                                               bcast_mac : ar->bssid,
-                                               0, NULL, 0);
-
-               ar->user_key_ctrl = 0;
-
-       } else {
-               ath6kl_dbg(ATH6KL_DBG_TRC,
-                          "%s: wmi is not ready 0x%p 0x%p\n",
-                          __func__, ar, ar->wmi);
-
-               /* Shut down WMI if we have started it */
-               if (test_bit(WMI_ENABLED, &ar->flag)) {
-                       ath6kl_dbg(ATH6KL_DBG_TRC,
-                                  "%s: shut down wmi\n", __func__);
-                       ath6kl_wmi_shutdown(ar->wmi);
-                       clear_bit(WMI_ENABLED, &ar->flag);
-                       ar->wmi = NULL;
-               }
-       }
-
-       if (ar->htc_target) {
-               ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
-               ath6kl_htc_stop(ar->htc_target);
-       }
-
-       /*
-        * Try to reset the device if we can. The driver may have been
-        * configure NOT to reset the target during a debug session.
-        */
-       ath6kl_dbg(ATH6KL_DBG_TRC,
-                  "attempting to reset target on instance destroy\n");
-       ath6kl_reset_device(ar, ar->target_type, true, true);
-}
-
-static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
 {
        u8 index;
        u8 keyusage;
 
        for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
-               if (ar->wep_key_list[index].key_len) {
+               if (vif->wep_key_list[index].key_len) {
                        keyusage = GROUP_USAGE;
-                       if (index == ar->def_txkey_index)
+                       if (index == vif->def_txkey_index)
                                keyusage |= TX_USAGE;
 
-                       ath6kl_wmi_addkey_cmd(ar->wmi,
+                       ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
                                              index,
                                              WEP_CRYPT,
                                              keyusage,
-                                             ar->wep_key_list[index].key_len,
-                                             NULL,
-                                             ar->wep_key_list[index].key,
+                                             vif->wep_key_list[index].key_len,
+                                             NULL, 0,
+                                             vif->wep_key_list[index].key,
                                              KEY_OP_INIT_VAL, NULL,
                                              NO_SYNC_WMIFLAG);
                }
        }
 }
 
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
 {
+       struct ath6kl *ar = vif->ar;
        struct ath6kl_req_key *ik;
        int res;
        u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@@ -529,10 +461,10 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
 
-       switch (ar->auth_mode) {
+       switch (vif->auth_mode) {
        case NONE_AUTH:
-               if (ar->prwise_crypto == WEP_CRYPT)
-                       ath6kl_install_static_wep_keys(ar);
+               if (vif->prwise_crypto == WEP_CRYPT)
+                       ath6kl_install_static_wep_keys(vif);
                break;
        case WPA_PSK_AUTH:
        case WPA2_PSK_AUTH:
@@ -544,8 +476,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
                           "the initial group key for AP mode\n");
                memset(key_rsc, 0, sizeof(key_rsc));
                res = ath6kl_wmi_addkey_cmd(
-                       ar->wmi, ik->key_index, ik->key_type,
-                       GROUP_USAGE, ik->key_len, key_rsc, ik->key,
+                       ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
+                       GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
+                       ik->key,
                        KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
                if (res) {
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@@ -554,15 +487,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
                break;
        }
 
-       ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
-       set_bit(CONNECTED, &ar->flag);
-       netif_carrier_on(ar->net_dev);
+       ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
+       set_bit(CONNECTED, &vif->flags);
+       netif_carrier_on(vif->ndev);
 }
 
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
                                u8 keymgmt, u8 ucipher, u8 auth,
                                u8 assoc_req_len, u8 *assoc_info)
 {
+       struct ath6kl *ar = vif->ar;
        u8 *ies = NULL, *wpa_ie = NULL, *pos;
        size_t ies_len = 0;
        struct station_info sinfo;
@@ -617,348 +551,32 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
        sinfo.assoc_req_ies_len = ies_len;
        sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
 
-       cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL);
+       cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
 
-       netif_wake_queue(ar->net_dev);
-}
-
-/* Functions for Tx credit handling */
-void ath6k_credit_init(struct htc_credit_state_info *cred_info,
-                      struct list_head *ep_list,
-                      int tot_credits)
-{
-       struct htc_endpoint_credit_dist *cur_ep_dist;
-       int count;
-
-       cred_info->cur_free_credits = tot_credits;
-       cred_info->total_avail_credits = tot_credits;
-
-       list_for_each_entry(cur_ep_dist, ep_list, list) {
-               if (cur_ep_dist->endpoint == ENDPOINT_0)
-                       continue;
-
-               cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
-
-               if (tot_credits > 4)
-                       if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
-                           (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
-                               ath6kl_deposit_credit_to_ep(cred_info,
-                                               cur_ep_dist,
-                                               cur_ep_dist->cred_min);
-                               cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
-                       }
-
-               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
-                       ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
-                                                   cur_ep_dist->cred_min);
-                       /*
-                        * Control service is always marked active, it
-                        * never goes inactive EVER.
-                        */
-                       cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
-               } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
-                       /* this is the lowest priority data endpoint */
-                       cred_info->lowestpri_ep_dist = cur_ep_dist->list;
-
-               /*
-                * Streams have to be created (explicit | implicit) for all
-                * kinds of traffic. BE endpoints are also inactive in the
-                * beginning. When BE traffic starts it creates implicit
-                * streams that redistributes credits.
-                *
-                * Note: all other endpoints have minimums set but are
-                * initially given NO credits. credits will be distributed
-                * as traffic activity demands
-                */
-       }
-
-       WARN_ON(cred_info->cur_free_credits <= 0);
-
-       list_for_each_entry(cur_ep_dist, ep_list, list) {
-               if (cur_ep_dist->endpoint == ENDPOINT_0)
-                       continue;
-
-               if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
-                       cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
-               else {
-                       /*
-                        * For the remaining data endpoints, we assume that
-                        * each cred_per_msg are the same. We use a simple
-                        * calculation here, we take the remaining credits
-                        * and determine how many max messages this can
-                        * cover and then set each endpoint's normal value
-                        * equal to 3/4 this amount.
-                        */
-                       count = (cred_info->cur_free_credits /
-                                cur_ep_dist->cred_per_msg)
-                               * cur_ep_dist->cred_per_msg;
-                       count = (count * 3) >> 2;
-                       count = max(count, cur_ep_dist->cred_per_msg);
-                       cur_ep_dist->cred_norm = count;
-
-               }
-       }
-}
-
-/* initialize and setup credit distribution */
-int ath6k_setup_credit_dist(void *htc_handle,
-                           struct htc_credit_state_info *cred_info)
-{
-       u16 servicepriority[5];
-
-       memset(cred_info, 0, sizeof(struct htc_credit_state_info));
-
-       servicepriority[0] = WMI_CONTROL_SVC;  /* highest */
-       servicepriority[1] = WMI_DATA_VO_SVC;
-       servicepriority[2] = WMI_DATA_VI_SVC;
-       servicepriority[3] = WMI_DATA_BE_SVC;
-       servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
-
-       /* set priority list */
-       ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
-
-       return 0;
-}
-
-/* reduce an ep's credits back to a set limit */
-static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
-                                struct htc_endpoint_credit_dist  *ep_dist,
-                                int limit)
-{
-       int credits;
-
-       ep_dist->cred_assngd = limit;
-
-       if (ep_dist->credits <= limit)
-               return;
-
-       credits = ep_dist->credits - limit;
-       ep_dist->credits -= credits;
-       cred_info->cur_free_credits += credits;
-}
-
-static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
-                               struct list_head *epdist_list)
-{
-       struct htc_endpoint_credit_dist *cur_dist_list;
-
-       list_for_each_entry(cur_dist_list, epdist_list, list) {
-               if (cur_dist_list->endpoint == ENDPOINT_0)
-                       continue;
-
-               if (cur_dist_list->cred_to_dist > 0) {
-                       cur_dist_list->credits +=
-                                       cur_dist_list->cred_to_dist;
-                       cur_dist_list->cred_to_dist = 0;
-                       if (cur_dist_list->credits >
-                           cur_dist_list->cred_assngd)
-                               ath6k_reduce_credits(cred_info,
-                                               cur_dist_list,
-                                               cur_dist_list->cred_assngd);
-
-                       if (cur_dist_list->credits >
-                           cur_dist_list->cred_norm)
-                               ath6k_reduce_credits(cred_info, cur_dist_list,
-                                                    cur_dist_list->cred_norm);
-
-                       if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
-                               if (cur_dist_list->txq_depth == 0)
-                                       ath6k_reduce_credits(cred_info,
-                                                            cur_dist_list, 0);
-                       }
-               }
-       }
-}
-
-/*
- * HTC has an endpoint that needs credits, ep_dist is the endpoint in
- * question.
- */
-void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
-                       struct htc_endpoint_credit_dist *ep_dist)
-{
-       struct htc_endpoint_credit_dist *curdist_list;
-       int credits = 0;
-       int need;
-
-       if (ep_dist->svc_id == WMI_CONTROL_SVC)
-               goto out;
-
-       if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
-           (ep_dist->svc_id == WMI_DATA_VO_SVC))
-               if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
-                       goto out;
-
-       /*
-        * For all other services, we follow a simple algorithm of:
-        *
-        * 1. checking the free pool for credits
-        * 2. checking lower priority endpoints for credits to take
-        */
-
-       credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-       if (credits >= ep_dist->seek_cred)
-               goto out;
-
-       /*
-        * We don't have enough in the free pool, try taking away from
-        * lower priority services The rule for taking away credits:
-        *
-        *   1. Only take from lower priority endpoints
-        *   2. Only take what is allocated above the minimum (never
-        *      starve an endpoint completely)
-        *   3. Only take what you need.
-        */
-
-       list_for_each_entry_reverse(curdist_list,
-                                   &cred_info->lowestpri_ep_dist,
-                                   list) {
-               if (curdist_list == ep_dist)
-                       break;
-
-               need = ep_dist->seek_cred - cred_info->cur_free_credits;
-
-               if ((curdist_list->cred_assngd - need) >=
-                    curdist_list->cred_min) {
-                       /*
-                        * The current one has been allocated more than
-                        * it's minimum and it has enough credits assigned
-                        * above it's minimum to fulfill our need try to
-                        * take away just enough to fulfill our need.
-                        */
-                       ath6k_reduce_credits(cred_info, curdist_list,
-                                       curdist_list->cred_assngd - need);
-
-                       if (cred_info->cur_free_credits >=
-                           ep_dist->seek_cred)
-                               break;
-               }
-
-               if (curdist_list->endpoint == ENDPOINT_0)
-                       break;
-       }
-
-       credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-out:
-       /* did we find some credits? */
-       if (credits)
-               ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
-
-       ep_dist->seek_cred = 0;
-}
-
-/* redistribute credits based on activity change */
-static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
-                                      struct list_head *ep_dist_list)
-{
-       struct htc_endpoint_credit_dist *curdist_list;
-
-       list_for_each_entry(curdist_list, ep_dist_list, list) {
-               if (curdist_list->endpoint == ENDPOINT_0)
-                       continue;
-
-               if ((curdist_list->svc_id == WMI_DATA_BK_SVC)  ||
-                   (curdist_list->svc_id == WMI_DATA_BE_SVC))
-                       curdist_list->dist_flags |= HTC_EP_ACTIVE;
-
-               if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
-                   !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
-                       if (curdist_list->txq_depth == 0)
-                               ath6k_reduce_credits(info,
-                                               curdist_list, 0);
-                       else
-                               ath6k_reduce_credits(info,
-                                               curdist_list,
-                                               curdist_list->cred_min);
-               }
-       }
-}
-
-/*
- *
- * This function is invoked whenever endpoints require credit
- * distributions. A lock is held while this function is invoked, this
- * function shall NOT block. The ep_dist_list is a list of distribution
- * structures in prioritized order as defined by the call to the
- * htc_set_credit_dist() api.
- */
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
-                            struct list_head *ep_dist_list,
-                            enum htc_credit_dist_reason reason)
-{
-       switch (reason) {
-       case HTC_CREDIT_DIST_SEND_COMPLETE:
-               ath6k_credit_update(cred_info, ep_dist_list);
-               break;
-       case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
-               ath6k_redistribute_credits(cred_info, ep_dist_list);
-               break;
-       default:
-               break;
-       }
-
-       WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
-       WARN_ON(cred_info->cur_free_credits < 0);
+       netif_wake_queue(vif->ndev);
 }
 
 void disconnect_timer_handler(unsigned long ptr)
 {
        struct net_device *dev = (struct net_device *)ptr;
-       struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       ath6kl_init_profile_info(ar);
-       ath6kl_disconnect(ar);
+       ath6kl_init_profile_info(vif);
+       ath6kl_disconnect(vif);
 }
 
-void ath6kl_disconnect(struct ath6kl *ar)
+void ath6kl_disconnect(struct ath6kl_vif *vif)
 {
-       if (test_bit(CONNECTED, &ar->flag) ||
-           test_bit(CONNECT_PEND, &ar->flag)) {
-               ath6kl_wmi_disconnect_cmd(ar->wmi);
+       if (test_bit(CONNECTED, &vif->flags) ||
+           test_bit(CONNECT_PEND, &vif->flags)) {
+               ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
                /*
                 * Disconnect command is issued, clear the connect pending
                 * flag. The connected flag will be cleared in
                 * disconnect event notification.
                 */
-               clear_bit(CONNECT_PEND, &ar->flag);
-       }
-}
-
-void ath6kl_deep_sleep_enable(struct ath6kl *ar)
-{
-       switch (ar->sme_state) {
-       case SME_CONNECTING:
-               cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
-                                       NULL, 0,
-                                       WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                       GFP_KERNEL);
-               break;
-       case SME_CONNECTED:
-       default:
-               /*
-                * FIXME: oddly enough smeState is in DISCONNECTED during
-                * suspend, why? Need to send disconnected event in that
-                * state.
-                */
-               cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
-               break;
+               clear_bit(CONNECT_PEND, &vif->flags);
        }
-
-       if (test_bit(CONNECTED, &ar->flag) ||
-           test_bit(CONNECT_PEND, &ar->flag))
-               ath6kl_wmi_disconnect_cmd(ar->wmi);
-
-       ar->sme_state = SME_DISCONNECTED;
-
-       /* disable scanning */
-       if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
-                                     0, 0) != 0)
-               printk(KERN_WARNING "ath6kl: failed to disable scan "
-                      "during suspend\n");
-
-       ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
 }
 
 /* WMI Event handlers */
@@ -980,17 +598,16 @@ static const char *get_hw_id_string(u32 id)
 void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
 {
        struct ath6kl *ar = devt;
-       struct net_device *dev = ar->net_dev;
 
-       memcpy(dev->dev_addr, datap, ETH_ALEN);
+       memcpy(ar->mac_addr, datap, ETH_ALEN);
        ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
-                  __func__, dev->dev_addr);
+                  __func__, ar->mac_addr);
 
        ar->version.wlan_ver = sw_ver;
        ar->version.abi_ver = abi_ver;
 
-       snprintf(ar->wdev->wiphy->fw_version,
-                sizeof(ar->wdev->wiphy->fw_version),
+       snprintf(ar->wiphy->fw_version,
+                sizeof(ar->wiphy->fw_version),
                 "%u.%u.%u.%u",
                 (ar->version.wlan_ver & 0xf0000000) >> 28,
                 (ar->version.wlan_ver & 0x0f000000) >> 24,
@@ -1001,78 +618,91 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
        set_bit(WMI_READY, &ar->flag);
        wake_up(&ar->event_wq);
 
-       ath6kl_info("hw %s fw %s%s\n",
-                   get_hw_id_string(ar->wdev->wiphy->hw_version),
-                   ar->wdev->wiphy->fw_version,
-                   test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+       if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
+               ath6kl_info("hw %s fw %s%s\n",
+                           get_hw_id_string(ar->wiphy->hw_version),
+                           ar->wiphy->fw_version,
+                           test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+       }
 }
 
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
 {
-       ath6kl_cfg80211_scan_complete_event(ar, status);
+       struct ath6kl *ar = vif->ar;
+       bool aborted = false;
+
+       if (status != WMI_SCAN_STATUS_SUCCESS)
+               aborted = true;
+
+       ath6kl_cfg80211_scan_complete_event(vif, aborted);
 
        if (!ar->usr_bss_filter) {
-               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-               ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+               ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+                                        NONE_BSS_FILTER, 0);
        }
 
-       ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+       ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
 }
 
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
                          u16 listen_int, u16 beacon_int,
                          enum network_type net_type, u8 beacon_ie_len,
                          u8 assoc_req_len, u8 assoc_resp_len,
                          u8 *assoc_info)
 {
-       unsigned long flags;
+       struct ath6kl *ar = vif->ar;
 
-       ath6kl_cfg80211_connect_event(ar, channel, bssid,
+       ath6kl_cfg80211_connect_event(vif, channel, bssid,
                                      listen_int, beacon_int,
                                      net_type, beacon_ie_len,
                                      assoc_req_len, assoc_resp_len,
                                      assoc_info);
 
-       memcpy(ar->bssid, bssid, sizeof(ar->bssid));
-       ar->bss_ch = channel;
+       memcpy(vif->bssid, bssid, sizeof(vif->bssid));
+       vif->bss_ch = channel;
 
-       if ((ar->nw_type == INFRA_NETWORK))
-               ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+       if ((vif->nw_type == INFRA_NETWORK))
+               ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+                                             ar->listen_intvl_t,
                                              ar->listen_intvl_b);
 
-       netif_wake_queue(ar->net_dev);
+       netif_wake_queue(vif->ndev);
 
        /* Update connect & link status atomically */
-       spin_lock_irqsave(&ar->lock, flags);
-       set_bit(CONNECTED, &ar->flag);
-       clear_bit(CONNECT_PEND, &ar->flag);
-       netif_carrier_on(ar->net_dev);
-       spin_unlock_irqrestore(&ar->lock, flags);
+       spin_lock_bh(&vif->if_lock);
+       set_bit(CONNECTED, &vif->flags);
+       clear_bit(CONNECT_PEND, &vif->flags);
+       netif_carrier_on(vif->ndev);
+       spin_unlock_bh(&vif->if_lock);
 
-       aggr_reset_state(ar->aggr_cntxt);
-       ar->reconnect_flag = 0;
+       aggr_reset_state(vif->aggr_cntxt);
+       vif->reconnect_flag = 0;
 
-       if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+       if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
                memset(ar->node_map, 0, sizeof(ar->node_map));
                ar->node_num = 0;
                ar->next_ep_id = ENDPOINT_2;
        }
 
        if (!ar->usr_bss_filter) {
-               set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-               ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0);
+               set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+               ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+                                        CURRENT_BSS_FILTER, 0);
        }
 }
 
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
 {
        struct ath6kl_sta *sta;
+       struct ath6kl *ar = vif->ar;
        u8 tsc[6];
+
        /*
         * For AP case, keyid will have aid of STA which sent pkt with
         * MIC error. Use this aid to get MAC & send it to hostapd.
         */
-       if (ar->nw_type == AP_NETWORK) {
+       if (vif->nw_type == AP_NETWORK) {
                sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
                if (!sta)
                        return;
@@ -1081,19 +711,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
                           "ap tkip mic error received from aid=%d\n", keyid);
 
                memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
-               cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+               cfg80211_michael_mic_failure(vif->ndev, sta->mac,
                                             NL80211_KEYTYPE_PAIRWISE, keyid,
                                             tsc, GFP_KERNEL);
        } else
-               ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+               ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
 
 }
 
-static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
 {
        struct wmi_target_stats *tgt_stats =
                (struct wmi_target_stats *) ptr;
-       struct target_stats *stats = &ar->target_stats;
+       struct ath6kl *ar = vif->ar;
+       struct target_stats *stats = &vif->target_stats;
        struct tkip_ccmp_stats *ccmp_stats;
        u8 ac;
 
@@ -1189,8 +820,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
        stats->wow_evt_discarded +=
                le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
 
-       if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
-               clear_bit(STATS_UPDATE_PEND, &ar->flag);
+       if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
+               clear_bit(STATS_UPDATE_PEND, &vif->flags);
                wake_up(&ar->event_wq);
        }
 }
@@ -1200,14 +831,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val)
        *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
 }
 
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
 {
        struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+       struct ath6kl *ar = vif->ar;
        struct wmi_ap_mode_stat *ap = &ar->ap_stats;
        struct wmi_per_sta_stat *st_ap, *st_p;
        u8 ac;
 
-       if (ar->nw_type == AP_NETWORK) {
+       if (vif->nw_type == AP_NETWORK) {
                if (len < sizeof(*p))
                        return;
 
@@ -1226,7 +858,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
                }
 
        } else {
-               ath6kl_update_target_stats(ar, ptr, len);
+               ath6kl_update_target_stats(vif, ptr, len);
        }
 }
 
@@ -1245,11 +877,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
        wake_up(&ar->event_wq);
 }
 
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
 {
        struct ath6kl_sta *conn;
        struct sk_buff *skb;
        bool psq_empty = false;
+       struct ath6kl *ar = vif->ar;
 
        conn = ath6kl_find_sta_by_aid(ar, aid);
 
@@ -1272,7 +905,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
        spin_unlock_bh(&conn->psq_lock);
 
        conn->sta_flags |= STA_PS_POLLED;
-       ath6kl_data_tx(skb, ar->net_dev);
+       ath6kl_data_tx(skb, vif->ndev);
        conn->sta_flags &= ~STA_PS_POLLED;
 
        spin_lock_bh(&conn->psq_lock);
@@ -1280,13 +913,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
        spin_unlock_bh(&conn->psq_lock);
 
        if (psq_empty)
-               ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+               ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
 }
 
-void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
 {
        bool mcastq_empty = false;
        struct sk_buff *skb;
+       struct ath6kl *ar = vif->ar;
 
        /*
         * If there are no associated STAs, ignore the DTIM expiry event.
@@ -1308,31 +942,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar)
                return;
 
        /* set the STA flag to dtim_expired for the frame to go out */
-       set_bit(DTIM_EXPIRED, &ar->flag);
+       set_bit(DTIM_EXPIRED, &vif->flags);
 
        spin_lock_bh(&ar->mcastpsq_lock);
        while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
                spin_unlock_bh(&ar->mcastpsq_lock);
 
-               ath6kl_data_tx(skb, ar->net_dev);
+               ath6kl_data_tx(skb, vif->ndev);
 
                spin_lock_bh(&ar->mcastpsq_lock);
        }
        spin_unlock_bh(&ar->mcastpsq_lock);
 
-       clear_bit(DTIM_EXPIRED, &ar->flag);
+       clear_bit(DTIM_EXPIRED, &vif->flags);
 
        /* clear the LSB of the BitMapCtl field of the TIM IE */
-       ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+       ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
 }
 
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
                             u8 assoc_resp_len, u8 *assoc_info,
                             u16 prot_reason_status)
 {
-       unsigned long flags;
+       struct ath6kl *ar = vif->ar;
 
-       if (ar->nw_type == AP_NETWORK) {
+       if (vif->nw_type == AP_NETWORK) {
                if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
                        return;
 
@@ -1344,31 +978,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
 
                        /* clear the LSB of the TIM IE's BitMapCtl field */
                        if (test_bit(WMI_READY, &ar->flag))
-                               ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+                               ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+                                                      MCAST_AID, 0);
                }
 
                if (!is_broadcast_ether_addr(bssid)) {
                        /* send event to application */
-                       cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+                       cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
                }
 
-               if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) {
-                       memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
-                       clear_bit(CONNECTED, &ar->flag);
+               if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
+                       memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+                       clear_bit(CONNECTED, &vif->flags);
                }
                return;
        }
 
-       ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+       ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
                                       assoc_resp_len, assoc_info,
                                       prot_reason_status);
 
-       aggr_reset_state(ar->aggr_cntxt);
+       aggr_reset_state(vif->aggr_cntxt);
 
-       del_timer(&ar->disconnect_timer);
+       del_timer(&vif->disconnect_timer);
 
-       ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
-                  "disconnect reason is %d\n", reason);
+       ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
 
        /*
         * If the event is due to disconnect cmd from the host, only they
@@ -1377,83 +1011,98 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
         */
        if (reason == DISCONNECT_CMD) {
                if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
-                       ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+                       ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+                                                NONE_BSS_FILTER, 0);
        } else {
-               set_bit(CONNECT_PEND, &ar->flag);
+               set_bit(CONNECT_PEND, &vif->flags);
                if (((reason == ASSOC_FAILED) &&
                    (prot_reason_status == 0x11)) ||
                    ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
-                    && (ar->reconnect_flag == 1))) {
-                       set_bit(CONNECTED, &ar->flag);
+                    && (vif->reconnect_flag == 1))) {
+                       set_bit(CONNECTED, &vif->flags);
                        return;
                }
        }
 
        /* update connect & link status atomically */
-       spin_lock_irqsave(&ar->lock, flags);
-       clear_bit(CONNECTED, &ar->flag);
-       netif_carrier_off(ar->net_dev);
-       spin_unlock_irqrestore(&ar->lock, flags);
+       spin_lock_bh(&vif->if_lock);
+       clear_bit(CONNECTED, &vif->flags);
+       netif_carrier_off(vif->ndev);
+       spin_unlock_bh(&vif->if_lock);
 
-       if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
-               ar->reconnect_flag = 0;
+       if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
+               vif->reconnect_flag = 0;
 
        if (reason != CSERV_DISCONNECT)
                ar->user_key_ctrl = 0;
 
-       netif_stop_queue(ar->net_dev);
-       memset(ar->bssid, 0, sizeof(ar->bssid));
-       ar->bss_ch = 0;
+       netif_stop_queue(vif->ndev);
+       memset(vif->bssid, 0, sizeof(vif->bssid));
+       vif->bss_ch = 0;
 
        ath6kl_tx_data_cleanup(ar);
 }
 
-static int ath6kl_open(struct net_device *dev)
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
 {
-       struct ath6kl *ar = ath6kl_priv(dev);
-       unsigned long flags;
+       struct ath6kl_vif *vif;
+
+       spin_lock_bh(&ar->list_lock);
+       if (list_empty(&ar->vif_list)) {
+               spin_unlock_bh(&ar->list_lock);
+               return NULL;
+       }
 
-       spin_lock_irqsave(&ar->lock, flags);
+       vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
 
-       set_bit(WLAN_ENABLED, &ar->flag);
+       spin_unlock_bh(&ar->list_lock);
 
-       if (test_bit(CONNECTED, &ar->flag)) {
+       return vif;
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+       struct ath6kl_vif *vif = netdev_priv(dev);
+
+       set_bit(WLAN_ENABLED, &vif->flags);
+
+       if (test_bit(CONNECTED, &vif->flags)) {
                netif_carrier_on(dev);
                netif_wake_queue(dev);
        } else
                netif_carrier_off(dev);
 
-       spin_unlock_irqrestore(&ar->lock, flags);
-
        return 0;
 }
 
 static int ath6kl_close(struct net_device *dev)
 {
        struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
        netif_stop_queue(dev);
 
-       ath6kl_disconnect(ar);
+       ath6kl_disconnect(vif);
 
        if (test_bit(WMI_READY, &ar->flag)) {
-               if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
-                                             0, 0, 0))
+               if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF,
+                                             0, 0, 0, 0, 0, 0, 0, 0, 0))
                        return -EIO;
 
-               clear_bit(WLAN_ENABLED, &ar->flag);
        }
 
-       ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+       ath6kl_cfg80211_scan_complete_event(vif, true);
+
+       clear_bit(WLAN_ENABLED, &vif->flags);
 
        return 0;
 }
 
 static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
 {
-       struct ath6kl *ar = ath6kl_priv(dev);
+       struct ath6kl_vif *vif = netdev_priv(dev);
 
-       return &ar->net_stats;
+       return &vif->net_stats;
 }
 
 static struct net_device_ops ath6kl_netdev_ops = {
@@ -1466,6 +1115,7 @@ static struct net_device_ops ath6kl_netdev_ops = {
 void init_netdev(struct net_device *dev)
 {
        dev->netdev_ops = &ath6kl_netdev_ops;
+       dev->destructor = free_netdev;
        dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
 
        dev->needed_headroom = ETH_HLEN;
index 066d4f8..e69ca5e 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/sd.h>
-#include "htc_hif.h"
+#include "hif.h"
 #include "hif-ops.h"
 #include "target.h"
 #include "debug.h"
@@ -46,6 +46,8 @@ struct ath6kl_sdio {
        struct list_head scat_req;
 
        spinlock_t scat_lock;
+       bool scatter_enabled;
+
        bool is_disabled;
        atomic_t irq_handling;
        const struct sdio_device_id *id;
@@ -135,6 +137,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
 {
        int ret = 0;
 
+       sdio_claim_host(func);
+
        if (request & HIF_WRITE) {
                /* FIXME: looks like ugly workaround for something */
                if (addr >= HIF_MBOX_BASE_ADDR &&
@@ -156,6 +160,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
                        ret = sdio_memcpy_fromio(func, buf, addr, len);
        }
 
+       sdio_release_host(func);
+
        ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
                   request & HIF_WRITE ? "wr" : "rd", addr,
                   request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@@ -167,12 +173,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
 static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
 {
        struct bus_request *bus_req;
-       unsigned long flag;
 
-       spin_lock_irqsave(&ar_sdio->lock, flag);
+       spin_lock_bh(&ar_sdio->lock);
 
        if (list_empty(&ar_sdio->bus_req_freeq)) {
-               spin_unlock_irqrestore(&ar_sdio->lock, flag);
+               spin_unlock_bh(&ar_sdio->lock);
                return NULL;
        }
 
@@ -180,7 +185,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
                                   struct bus_request, list);
        list_del(&bus_req->list);
 
-       spin_unlock_irqrestore(&ar_sdio->lock, flag);
+       spin_unlock_bh(&ar_sdio->lock);
        ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
                   __func__, bus_req);
 
@@ -190,14 +195,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
 static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
                                     struct bus_request *bus_req)
 {
-       unsigned long flag;
-
        ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
                   __func__, bus_req);
 
-       spin_lock_irqsave(&ar_sdio->lock, flag);
+       spin_lock_bh(&ar_sdio->lock);
        list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
-       spin_unlock_irqrestore(&ar_sdio->lock, flag);
+       spin_unlock_bh(&ar_sdio->lock);
 }
 
 static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -291,10 +294,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
        mmc_req.cmd = &cmd;
        mmc_req.data = &data;
 
+       sdio_claim_host(ar_sdio->func);
+
        mmc_set_data_timeout(&data, ar_sdio->func->card);
        /* synchronous call to process request */
        mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
 
+       sdio_release_host(ar_sdio->func);
+
        status = cmd.error ? cmd.error : data.error;
 
 scat_complete:
@@ -395,11 +402,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
        } else
                tbuf = buf;
 
-       sdio_claim_host(ar_sdio->func);
        ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
        if ((request & HIF_READ) && bounced)
                memcpy(buf, tbuf, len);
-       sdio_release_host(ar_sdio->func);
 
        return ret;
 }
@@ -418,29 +423,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
                                                     req->request);
                context = req->packet;
                ath6kl_sdio_free_bus_req(ar_sdio, req);
-               ath6kldev_rw_comp_handler(context, status);
+               ath6kl_hif_rw_comp_handler(context, status);
        }
 }
 
 static void ath6kl_sdio_write_async_work(struct work_struct *work)
 {
        struct ath6kl_sdio *ar_sdio;
-       unsigned long flags;
        struct bus_request *req, *tmp_req;
 
        ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
-       sdio_claim_host(ar_sdio->func);
 
-       spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+       spin_lock_bh(&ar_sdio->wr_async_lock);
        list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
                list_del(&req->list);
-               spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+               spin_unlock_bh(&ar_sdio->wr_async_lock);
                __ath6kl_sdio_write_async(ar_sdio, req);
-               spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+               spin_lock_bh(&ar_sdio->wr_async_lock);
        }
-       spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
-
-       sdio_release_host(ar_sdio->func);
+       spin_unlock_bh(&ar_sdio->wr_async_lock);
 }
 
 static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@@ -459,20 +460,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
         */
        sdio_release_host(ar_sdio->func);
 
-       status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+       status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
        sdio_claim_host(ar_sdio->func);
        atomic_set(&ar_sdio->irq_handling, 0);
        WARN_ON(status && status != -ECANCELED);
 }
 
-static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_on(struct ath6kl *ar)
 {
+       struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        struct sdio_func *func = ar_sdio->func;
        int ret = 0;
 
        if (!ar_sdio->is_disabled)
                return 0;
 
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
+
        sdio_claim_host(func);
 
        ret = sdio_enable_func(func);
@@ -495,13 +499,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
        return ret;
 }
 
-static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_off(struct ath6kl *ar)
 {
+       struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        int ret;
 
        if (ar_sdio->is_disabled)
                return 0;
 
+       ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
+
        /* Disable the card */
        sdio_claim_host(ar_sdio->func);
        ret = sdio_disable_func(ar_sdio->func);
@@ -521,7 +528,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
 {
        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        struct bus_request *bus_req;
-       unsigned long flags;
 
        bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
 
@@ -534,9 +540,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
        bus_req->request = request;
        bus_req->packet = packet;
 
-       spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+       spin_lock_bh(&ar_sdio->wr_async_lock);
        list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
-       spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+       spin_unlock_bh(&ar_sdio->wr_async_lock);
        queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
 
        return 0;
@@ -582,9 +588,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
 {
        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        struct hif_scatter_req *node = NULL;
-       unsigned long flag;
 
-       spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+       spin_lock_bh(&ar_sdio->scat_lock);
 
        if (!list_empty(&ar_sdio->scat_req)) {
                node = list_first_entry(&ar_sdio->scat_req,
@@ -592,7 +597,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
                list_del(&node->list);
        }
 
-       spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+       spin_unlock_bh(&ar_sdio->scat_lock);
 
        return node;
 }
@@ -601,13 +606,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
                                        struct hif_scatter_req *s_req)
 {
        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
-       unsigned long flag;
 
-       spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+       spin_lock_bh(&ar_sdio->scat_lock);
 
        list_add_tail(&s_req->list, &ar_sdio->scat_req);
 
-       spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+       spin_unlock_bh(&ar_sdio->scat_lock);
 
 }
 
@@ -618,7 +622,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        u32 request = scat_req->req;
        int status = 0;
-       unsigned long flags;
 
        if (!scat_req->len)
                return -EINVAL;
@@ -627,14 +630,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
                "hif-scatter: total len: %d scatter entries: %d\n",
                scat_req->len, scat_req->scat_entries);
 
-       if (request & HIF_SYNCHRONOUS) {
-               sdio_claim_host(ar_sdio->func);
+       if (request & HIF_SYNCHRONOUS)
                status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
-               sdio_release_host(ar_sdio->func);
-       } else {
-               spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+       else {
+               spin_lock_bh(&ar_sdio->wr_async_lock);
                list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
-               spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+               spin_unlock_bh(&ar_sdio->wr_async_lock);
                queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
        }
 
@@ -646,23 +647,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
 {
        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        struct hif_scatter_req *s_req, *tmp_req;
-       unsigned long flag;
 
        /* empty the free list */
-       spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+       spin_lock_bh(&ar_sdio->scat_lock);
        list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
                list_del(&s_req->list);
-               spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+               spin_unlock_bh(&ar_sdio->scat_lock);
 
+               /*
+                * FIXME: should we also call completion handler with
+                * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
+                * that the packet is properly freed?
+                */
                if (s_req->busrequest)
                        ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
                kfree(s_req->virt_dma_buf);
                kfree(s_req->sgentries);
                kfree(s_req);
 
-               spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+               spin_lock_bh(&ar_sdio->scat_lock);
        }
-       spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+       spin_unlock_bh(&ar_sdio->scat_lock);
 }
 
 /* setup of HIF scatter resources */
@@ -673,6 +678,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
        int ret;
        bool virt_scat = false;
 
+       if (ar_sdio->scatter_enabled)
+               return 0;
+
+       ar_sdio->scatter_enabled = true;
+
        /* check if host supports scatter and it meets our requirements */
        if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
                ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@@ -687,8 +697,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
                                MAX_SCATTER_REQUESTS, virt_scat);
 
                if (!ret) {
-                       ath6kl_dbg(ATH6KL_DBG_SCATTER,
-                                  "hif-scatter enabled: max scatter req : %d entries: %d\n",
+                       ath6kl_dbg(ATH6KL_DBG_BOOT,
+                                  "hif-scatter enabled requests %d entries %d\n",
                                   MAX_SCATTER_REQUESTS,
                                   MAX_SCATTER_ENTRIES_PER_REQ);
 
@@ -712,8 +722,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
                        return ret;
                }
 
-               ath6kl_dbg(ATH6KL_DBG_SCATTER,
-                          "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+               ath6kl_dbg(ATH6KL_DBG_BOOT,
+                          "virtual scatter enabled requests %d entries %d\n",
                           ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
 
                target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@@ -724,7 +734,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
        return 0;
 }
 
-static int ath6kl_sdio_suspend(struct ath6kl *ar)
+static int ath6kl_sdio_config(struct ath6kl *ar)
+{
+       struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+       struct sdio_func *func = ar_sdio->func;
+       int ret;
+
+       sdio_claim_host(func);
+
+       if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+           MANUFACTURER_ID_AR6003_BASE) {
+               /* enable 4-bit ASYNC interrupt on AR6003 or later */
+               ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+                                               CCCR_SDIO_IRQ_MODE_REG,
+                                               SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+               if (ret) {
+                       ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+                                  ret);
+                       goto out;
+               }
+
+               ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
+       }
+
+       /* give us some time to enable, in ms */
+       func->enable_timeout = 100;
+
+       ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+       if (ret) {
+               ath6kl_err("Set sdio block size %d failed: %d)\n",
+                          HIF_MBOX_BLOCK_SIZE, ret);
+               sdio_release_host(func);
+               goto out;
+       }
+
+out:
+       sdio_release_host(func);
+
+       return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
 {
        struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
        struct sdio_func *func = ar_sdio->func;
@@ -733,12 +783,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
 
        flags = sdio_get_host_pm_caps(func);
 
-       if (!(flags & MMC_PM_KEEP_POWER))
-               /* as host doesn't support keep power we need to bail out */
-               ath6kl_dbg(ATH6KL_DBG_SDIO,
-                          "func %d doesn't support MMC_PM_KEEP_POWER\n",
-                          func->num);
-               return -EINVAL;
+       ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
+
+       if (!(flags & MMC_PM_KEEP_POWER) ||
+           (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
+               /* as host doesn't support keep power we need to cut power */
+               return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
+                                              NULL);
+       }
 
        ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
        if (ret) {
@@ -747,11 +799,85 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
                return ret;
        }
 
-       ath6kl_deep_sleep_enable(ar);
+       if ((flags & MMC_PM_WAKE_SDIO_IRQ) && wow) {
+               /*
+                * The host sdio controller is capable of keep power and
+                * sdio irq wake up at this point. It's fine to continue
+                * wow suspend operation.
+                */
+               ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+               if (ret)
+                       return ret;
+
+               ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+               if (ret)
+                       ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+               return ret;
+       }
+
+       return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
+}
+
+static int ath6kl_sdio_resume(struct ath6kl *ar)
+{
+       switch (ar->state) {
+       case ATH6KL_STATE_OFF:
+       case ATH6KL_STATE_CUTPOWER:
+               ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+                          "sdio resume configuring sdio\n");
+
+               /* need to set sdio settings after power is cut from sdio */
+               ath6kl_sdio_config(ar);
+               break;
+
+       case ATH6KL_STATE_ON:
+               break;
+
+       case ATH6KL_STATE_DEEPSLEEP:
+               break;
+
+       case ATH6KL_STATE_WOW:
+               break;
+       }
+
+       ath6kl_cfg80211_resume(ar);
 
        return 0;
 }
 
+static void ath6kl_sdio_stop(struct ath6kl *ar)
+{
+       struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+       struct bus_request *req, *tmp_req;
+       void *context;
+
+       /* FIXME: make sure that wq is not queued again */
+
+       cancel_work_sync(&ar_sdio->wr_async_work);
+
+       spin_lock_bh(&ar_sdio->wr_async_lock);
+
+       list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+               list_del(&req->list);
+
+               if (req->scat_req) {
+                       /* this is a scatter gather request */
+                       req->scat_req->status = -ECANCELED;
+                       req->scat_req->complete(ar_sdio->ar->htc_target,
+                                               req->scat_req);
+               } else {
+                       context = req->packet;
+                       ath6kl_sdio_free_bus_req(ar_sdio, req);
+                       ath6kl_hif_rw_comp_handler(context, -ECANCELED);
+               }
+       }
+
+       spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+       WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
+}
+
 static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
        .read_write_sync = ath6kl_sdio_read_write_sync,
        .write_async = ath6kl_sdio_write_async,
@@ -763,8 +889,43 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
        .scat_req_rw = ath6kl_sdio_async_rw_scatter,
        .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
        .suspend = ath6kl_sdio_suspend,
+       .resume = ath6kl_sdio_resume,
+       .power_on = ath6kl_sdio_power_on,
+       .power_off = ath6kl_sdio_power_off,
+       .stop = ath6kl_sdio_stop,
 };
 
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath6kl_sdio_pm_suspend(struct device *device)
+{
+       ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
+
+       return 0;
+}
+
+static int ath6kl_sdio_pm_resume(struct device *device)
+{
+       ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
+                        ath6kl_sdio_pm_resume);
+
+#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
+
+#else
+
+#define ATH6KL_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
 static int ath6kl_sdio_probe(struct sdio_func *func,
                             const struct sdio_device_id *id)
 {
@@ -773,8 +934,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
        struct ath6kl *ar;
        int count;
 
-       ath6kl_dbg(ATH6KL_DBG_SDIO,
-                  "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT,
+                  "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
                   func->num, func->vendor, func->device,
                   func->max_blksize, func->cur_blksize);
 
@@ -820,57 +981,22 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
 
        ath6kl_sdio_set_mbox_info(ar);
 
-       sdio_claim_host(func);
-
-       if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
-           MANUFACTURER_ID_AR6003_BASE) {
-               /* enable 4-bit ASYNC interrupt on AR6003 or later */
-               ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
-                                               CCCR_SDIO_IRQ_MODE_REG,
-                                               SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
-               if (ret) {
-                       ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
-                                  ret);
-                       sdio_release_host(func);
-                       goto err_cfg80211;
-               }
-
-               ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
-       }
-
-       /* give us some time to enable, in ms */
-       func->enable_timeout = 100;
-
-       sdio_release_host(func);
-
-       ret = ath6kl_sdio_power_on(ar_sdio);
-       if (ret)
-               goto err_cfg80211;
-
-       sdio_claim_host(func);
-
-       ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+       ret = ath6kl_sdio_config(ar);
        if (ret) {
-               ath6kl_err("Set sdio block size %d failed: %d)\n",
-                          HIF_MBOX_BLOCK_SIZE, ret);
-               sdio_release_host(func);
-               goto err_off;
+               ath6kl_err("Failed to config sdio: %d\n", ret);
+               goto err_core_alloc;
        }
 
-       sdio_release_host(func);
-
        ret = ath6kl_core_init(ar);
        if (ret) {
                ath6kl_err("Failed to init ath6kl core\n");
-               goto err_off;
+               goto err_core_alloc;
        }
 
        return ret;
 
-err_off:
-       ath6kl_sdio_power_off(ar_sdio);
-err_cfg80211:
-       ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_core_alloc:
+       ath6kl_core_free(ar_sdio->ar);
 err_dma:
        kfree(ar_sdio->dma_buffer);
 err_hif:
@@ -883,8 +1009,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
 {
        struct ath6kl_sdio *ar_sdio;
 
-       ath6kl_dbg(ATH6KL_DBG_SDIO,
-                  "removed func %d vendor 0x%x device 0x%x\n",
+       ath6kl_dbg(ATH6KL_DBG_BOOT,
+                  "sdio removed func %d vendor 0x%x device 0x%x\n",
                   func->num, func->vendor, func->device);
 
        ar_sdio = sdio_get_drvdata(func);
@@ -892,9 +1018,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
        ath6kl_stop_txrx(ar_sdio->ar);
        cancel_work_sync(&ar_sdio->wr_async_work);
 
-       ath6kl_unavail_ev(ar_sdio->ar);
-
-       ath6kl_sdio_power_off(ar_sdio);
+       ath6kl_core_cleanup(ar_sdio->ar);
 
        kfree(ar_sdio->dma_buffer);
        kfree(ar_sdio);
@@ -909,10 +1033,11 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
 MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
 
 static struct sdio_driver ath6kl_sdio_driver = {
-       .name = "ath6kl_sdio",
+       .name = "ath6kl",
        .id_table = ath6kl_sdio_devices,
        .probe = ath6kl_sdio_probe,
        .remove = ath6kl_sdio_remove,
+       .drv.pm = ATH6KL_SDIO_PM_OPS,
 };
 
 static int __init ath6kl_sdio_init(void)
index c9a7605..687e2b3 100644 (file)
@@ -320,7 +320,10 @@ struct host_interest {
 |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)
 |------------------------------------------------------------------------------|
 */
+#define HI_OPTION_FW_MODE_BITS        0x2
 #define HI_OPTION_FW_MODE_SHIFT        0xC
+
+#define HI_OPTION_FW_SUBMODE_BITS      0x2
 #define HI_OPTION_FW_SUBMODE_SHIFT     0x14
 
 /* Convert a Target virtual address into a Target physical address */
index a711707..d9cff2b 100644 (file)
@@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
        return ar->node_map[ep_map].ep_id;
 }
 
-static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
                                bool *more_data)
 {
        struct ethhdr *datap = (struct ethhdr *) skb->data;
        struct ath6kl_sta *conn = NULL;
        bool ps_queued = false, is_psq_empty = false;
+       struct ath6kl *ar = vif->ar;
 
        if (is_multicast_ether_addr(datap->h_dest)) {
                u8 ctr = 0;
@@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
                         * If this transmit is not because of a Dtim Expiry
                         * q it.
                         */
-                       if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+                       if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
                                bool is_mcastq_empty = false;
 
                                spin_lock_bh(&ar->mcastpsq_lock);
@@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
                                 */
                                if (is_mcastq_empty)
                                        ath6kl_wmi_set_pvb_cmd(ar->wmi,
+                                                              vif->fw_vif_idx,
                                                               MCAST_AID, 1);
 
                                ps_queued = true;
@@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
                        }
                }
        } else {
-               conn = ath6kl_find_sta(ar, datap->h_dest);
+               conn = ath6kl_find_sta(vif, datap->h_dest);
                if (!conn) {
                        dev_kfree_skb(skb);
 
@@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
                                 */
                                if (is_psq_empty)
                                        ath6kl_wmi_set_pvb_cmd(ar->wmi,
+                                                              vif->fw_vif_idx,
                                                               conn->aid, 1);
 
                                ps_queued = true;
@@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
        struct ath6kl *ar = ath6kl_priv(dev);
        struct ath6kl_cookie *cookie = NULL;
        enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+       struct ath6kl_vif *vif = netdev_priv(dev);
        u32 map_no = 0;
        u16 htc_tag = ATH6KL_DATA_PKT_TAG;
        u8 ac = 99 ; /* initialize to unmapped ac */
@@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
                   skb, skb->data, skb->len);
 
        /* If target is not associated */
-       if (!test_bit(CONNECTED, &ar->flag)) {
+       if (!test_bit(CONNECTED, &vif->flags)) {
                dev_kfree_skb(skb);
                return 0;
        }
@@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
                goto fail_tx;
 
        /* AP mode Power saving processing */
-       if (ar->nw_type == AP_NETWORK) {
-               if (ath6kl_powersave_ap(ar, skb, &more_data))
+       if (vif->nw_type == AP_NETWORK) {
+               if (ath6kl_powersave_ap(vif, skb, &more_data))
                        return 0;
        }
 
        if (test_bit(WMI_ENABLED, &ar->flag)) {
                if (skb_headroom(skb) < dev->needed_headroom) {
-                       WARN_ON(1);
-                       goto fail_tx;
+                       struct sk_buff *tmp_skb = skb;
+
+                       skb = skb_realloc_headroom(skb, dev->needed_headroom);
+                       kfree_skb(tmp_skb);
+                       if (skb == NULL) {
+                               vif->net_stats.tx_dropped++;
+                               return 0;
+                       }
                }
 
                if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
                }
 
                if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
-                                           more_data, 0, 0, NULL)) {
+                                           more_data, 0, 0, NULL,
+                                           vif->fw_vif_idx)) {
                        ath6kl_err("wmi_data_hdr_add failed\n");
                        goto fail_tx;
                }
 
-               if ((ar->nw_type == ADHOC_NETWORK) &&
-                    ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+               if ((vif->nw_type == ADHOC_NETWORK) &&
+                    ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
                        chk_adhoc_ps_mapping = true;
                else {
                        /* get the stream mapping */
-                       ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
-                                   0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+                       ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+                                   vif->fw_vif_idx, skb,
+                                   0, test_bit(WMM_ENABLED, &vif->flags), &ac);
                        if (ret)
                                goto fail_tx;
                }
@@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
 fail_tx:
        dev_kfree_skb(skb);
 
-       ar->net_stats.tx_dropped++;
-       ar->net_stats.tx_aborted_errors++;
+       vif->net_stats.tx_dropped++;
+       vif->net_stats.tx_aborted_errors++;
 
        return 0;
 }
@@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
                                               struct htc_packet *packet)
 {
        struct ath6kl *ar = target->dev->ar;
+       struct ath6kl_vif *vif;
        enum htc_endpoint_id endpoint = packet->endpoint;
+       enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
 
        if (endpoint == ar->ctrl_ep) {
                /*
@@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
                set_bit(WMI_CTRL_EP_FULL, &ar->flag);
                spin_unlock_bh(&ar->lock);
                ath6kl_err("wmi ctrl ep is full\n");
-               return HTC_SEND_FULL_KEEP;
+               goto stop_adhoc_netq;
        }
 
        if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
-               return HTC_SEND_FULL_KEEP;
-
-       if (ar->nw_type == ADHOC_NETWORK)
-               /*
-                * In adhoc mode, we cannot differentiate traffic
-                * priorities so there is no need to continue, however we
-                * should stop the network.
-                */
-               goto stop_net_queues;
+               goto stop_adhoc_netq;
 
        /*
         * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@@ -459,29 +465,43 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
         */
        if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
            ar->hiac_stream_active_pri &&
-           ar->cookie_count <= MAX_HI_COOKIE_NUM)
+           ar->cookie_count <= MAX_HI_COOKIE_NUM) {
                /*
                 * Give preference to the highest priority stream by
                 * dropping the packets which overflowed.
                 */
-               return HTC_SEND_FULL_DROP;
+               action = HTC_SEND_FULL_DROP;
+               goto stop_adhoc_netq;
+       }
 
-stop_net_queues:
-       spin_lock_bh(&ar->lock);
-       set_bit(NETQ_STOPPED, &ar->flag);
-       spin_unlock_bh(&ar->lock);
-       netif_stop_queue(ar->net_dev);
+stop_adhoc_netq:
+       /* FIXME: Locking */
+       spin_lock_bh(&ar->list_lock);
+       list_for_each_entry(vif, &ar->vif_list, list) {
+               if (vif->nw_type == ADHOC_NETWORK) {
+                       spin_unlock_bh(&ar->list_lock);
 
-       return HTC_SEND_FULL_KEEP;
+                       spin_lock_bh(&vif->if_lock);
+                       set_bit(NETQ_STOPPED, &vif->flags);
+                       spin_unlock_bh(&vif->if_lock);
+                       netif_stop_queue(vif->ndev);
+
+                       return action;
+               }
+       }
+       spin_unlock_bh(&ar->list_lock);
+
+       return action;
 }
 
 /* TODO this needs to be looked at */
-static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
                                     enum htc_endpoint_id eid, u32 map_no)
 {
+       struct ath6kl *ar = vif->ar;
        u32 i;
 
-       if (ar->nw_type != ADHOC_NETWORK)
+       if (vif->nw_type != ADHOC_NETWORK)
                return;
 
        if (!ar->ibss_ps_enable)
@@ -523,7 +543,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
        int status;
        enum htc_endpoint_id eid;
        bool wake_event = false;
-       bool flushing = false;
+       bool flushing[MAX_NUM_VIF] = {false};
+       u8 if_idx;
+       struct ath6kl_vif *vif;
 
        skb_queue_head_init(&skb_queue);
 
@@ -569,15 +591,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
                                wake_event = true;
                }
 
+               if (eid == ar->ctrl_ep) {
+                       if_idx = wmi_cmd_hdr_get_if_idx(
+                               (struct wmi_cmd_hdr *) skb->data);
+               } else {
+                       if_idx = wmi_data_hdr_get_if_idx(
+                               (struct wmi_data_hdr *) skb->data);
+               }
+
+               vif = ath6kl_get_vif_by_index(ar, if_idx);
+               if (!vif) {
+                       ath6kl_free_cookie(ar, ath6kl_cookie);
+                       continue;
+               }
+
                if (status) {
                        if (status == -ECANCELED)
                                /* a packet was flushed  */
-                               flushing = true;
+                               flushing[if_idx] = true;
+
+                       vif->net_stats.tx_errors++;
 
-                       ar->net_stats.tx_errors++;
+                       if (status != -ENOSPC && status != -ECANCELED)
+                               ath6kl_warn("tx complete error: %d\n", status);
 
-                       if (status != -ENOSPC)
-                               ath6kl_err("tx error, status: 0x%x\n", status);
                        ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
                                   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
                                   __func__, skb, packet->buf, packet->act_len,
@@ -588,27 +625,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
                                   __func__, skb, packet->buf, packet->act_len,
                                   eid, "OK");
 
-                       flushing = false;
-                       ar->net_stats.tx_packets++;
-                       ar->net_stats.tx_bytes += skb->len;
+                       flushing[if_idx] = false;
+                       vif->net_stats.tx_packets++;
+                       vif->net_stats.tx_bytes += skb->len;
                }
 
-               ath6kl_tx_clear_node_map(ar, eid, map_no);
+               ath6kl_tx_clear_node_map(vif, eid, map_no);
 
                ath6kl_free_cookie(ar, ath6kl_cookie);
 
-               if (test_bit(NETQ_STOPPED, &ar->flag))
-                       clear_bit(NETQ_STOPPED, &ar->flag);
+               if (test_bit(NETQ_STOPPED, &vif->flags))
+                       clear_bit(NETQ_STOPPED, &vif->flags);
        }
 
        spin_unlock_bh(&ar->lock);
 
        __skb_queue_purge(&skb_queue);
 
-       if (test_bit(CONNECTED, &ar->flag)) {
-               if (!flushing)
-                       netif_wake_queue(ar->net_dev);
+       /* FIXME: Locking */
+       spin_lock_bh(&ar->list_lock);
+       list_for_each_entry(vif, &ar->vif_list, list) {
+               if (test_bit(CONNECTED, &vif->flags) &&
+                   !flushing[vif->fw_vif_idx]) {
+                       spin_unlock_bh(&ar->list_lock);
+                       netif_wake_queue(vif->ndev);
+                       spin_lock_bh(&ar->list_lock);
+               }
        }
+       spin_unlock_bh(&ar->list_lock);
 
        if (wake_event)
                wake_up(&ar->event_wq);
@@ -1041,8 +1085,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
        struct ath6kl_sta *conn = NULL;
        struct sk_buff *skb1 = NULL;
        struct ethhdr *datap = NULL;
+       struct ath6kl_vif *vif;
        u16 seq_no, offset;
-       u8 tid;
+       u8 tid, if_idx;
 
        ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
                   "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@@ -1050,7 +1095,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                   packet->act_len, status);
 
        if (status || !(skb->data + HTC_HDR_LENGTH)) {
-               ar->net_stats.rx_errors++;
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+       skb_pull(skb, HTC_HDR_LENGTH);
+
+       if (ept == ar->ctrl_ep) {
+               if_idx =
+               wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
+       } else {
+               if_idx =
+               wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
+       }
+
+       vif = ath6kl_get_vif_by_index(ar, if_idx);
+       if (!vif) {
                dev_kfree_skb(skb);
                return;
        }
@@ -1059,28 +1120,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
         * Take lock to protect buffer counts and adaptive power throughput
         * state.
         */
-       spin_lock_bh(&ar->lock);
+       spin_lock_bh(&vif->if_lock);
 
-       ar->net_stats.rx_packets++;
-       ar->net_stats.rx_bytes += packet->act_len;
+       vif->net_stats.rx_packets++;
+       vif->net_stats.rx_bytes += packet->act_len;
 
-       spin_unlock_bh(&ar->lock);
+       spin_unlock_bh(&vif->if_lock);
 
-       skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
-       skb_pull(skb, HTC_HDR_LENGTH);
 
        ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
                        skb->data, skb->len);
 
-       skb->dev = ar->net_dev;
+       skb->dev = vif->ndev;
 
        if (!test_bit(WMI_ENABLED, &ar->flag)) {
                if (EPPING_ALIGNMENT_PAD > 0)
                        skb_pull(skb, EPPING_ALIGNMENT_PAD);
-               ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+               ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
                return;
        }
 
+       ath6kl_check_wow_status(ar);
+
        if (ept == ar->ctrl_ep) {
                ath6kl_wmi_control_rx(ar->wmi, skb);
                return;
@@ -1096,18 +1157,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
         * that do not have LLC hdr. They are 16 bytes in size.
         * Allow these frames in the AP mode.
         */
-       if (ar->nw_type != AP_NETWORK &&
+       if (vif->nw_type != AP_NETWORK &&
            ((packet->act_len < min_hdr_len) ||
             (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
                ath6kl_info("frame len is too short or too long\n");
-               ar->net_stats.rx_errors++;
-               ar->net_stats.rx_length_errors++;
+               vif->net_stats.rx_errors++;
+               vif->net_stats.rx_length_errors++;
                dev_kfree_skb(skb);
                return;
        }
 
        /* Get the Power save state of the STA */
-       if (ar->nw_type == AP_NETWORK) {
+       if (vif->nw_type == AP_NETWORK) {
                meta_type = wmi_data_hdr_get_meta(dhdr);
 
                ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@@ -1129,7 +1190,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                }
 
                datap = (struct ethhdr *) (skb->data + offset);
-               conn = ath6kl_find_sta(ar, datap->h_source);
+               conn = ath6kl_find_sta(vif, datap->h_source);
 
                if (!conn) {
                        dev_kfree_skb(skb);
@@ -1160,12 +1221,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                                while ((skbuff = skb_dequeue(&conn->psq))
                                       != NULL) {
                                        spin_unlock_bh(&conn->psq_lock);
-                                       ath6kl_data_tx(skbuff, ar->net_dev);
+                                       ath6kl_data_tx(skbuff, vif->ndev);
                                        spin_lock_bh(&conn->psq_lock);
                                }
                                spin_unlock_bh(&conn->psq_lock);
                                /* Clear the PVB for this STA */
-                               ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+                               ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+                                                      conn->aid, 0);
                        }
                }
 
@@ -1215,12 +1277,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                return;
        }
 
-       if (!(ar->net_dev->flags & IFF_UP)) {
+       if (!(vif->ndev->flags & IFF_UP)) {
                dev_kfree_skb(skb);
                return;
        }
 
-       if (ar->nw_type == AP_NETWORK) {
+       if (vif->nw_type == AP_NETWORK) {
                datap = (struct ethhdr *) skb->data;
                if (is_multicast_ether_addr(datap->h_dest))
                        /*
@@ -1235,8 +1297,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                         * frame to it on the air else send the
                         * frame up the stack.
                         */
-                       struct ath6kl_sta *conn = NULL;
-                       conn = ath6kl_find_sta(ar, datap->h_dest);
+                       conn = ath6kl_find_sta(vif, datap->h_dest);
 
                        if (conn && ar->intra_bss) {
                                skb1 = skb;
@@ -1247,18 +1308,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                        }
                }
                if (skb1)
-                       ath6kl_data_tx(skb1, ar->net_dev);
+                       ath6kl_data_tx(skb1, vif->ndev);
+
+               if (skb == NULL) {
+                       /* nothing to deliver up the stack */
+                       return;
+               }
        }
 
        datap = (struct ethhdr *) skb->data;
 
        if (is_unicast_ether_addr(datap->h_dest) &&
-           aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+           aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
                                  is_amsdu, skb))
                /* aggregation code will handle the skb */
                return;
 
-       ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+       ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
 }
 
 static void aggr_timeout(unsigned long arg)
@@ -1336,9 +1402,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
        memset(stats, 0, sizeof(struct rxtid_stats));
 }
 
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+                            u8 win_sz)
 {
-       struct aggr_info *p_aggr = ar->aggr_cntxt;
+       struct aggr_info *p_aggr = vif->aggr_cntxt;
        struct rxtid *rxtid;
        struct rxtid_stats *stats;
        u16 hold_q_size;
@@ -1405,9 +1472,9 @@ struct aggr_info *aggr_init(struct net_device *dev)
        return p_aggr;
 }
 
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
 {
-       struct aggr_info *p_aggr = ar->aggr_cntxt;
+       struct aggr_info *p_aggr = vif->aggr_cntxt;
        struct rxtid *rxtid;
 
        if (!p_aggr)
index a7de23c..922344d 100644 (file)
@@ -21,7 +21,7 @@
 #include "../regd.h"
 #include "../regd_common.h"
 
-static int ath6kl_wmi_sync_point(struct wmi *wmi);
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
 
 static const s32 wmi_rate_tbl[][2] = {
        /* {W/O SGI, with SGI} */
@@ -81,6 +81,26 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
        return wmi->ep_id;
 }
 
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
+{
+       struct ath6kl_vif *vif, *found = NULL;
+
+       if (WARN_ON(if_idx > (MAX_NUM_VIF - 1)))
+               return NULL;
+
+       /* FIXME: Locking */
+       spin_lock_bh(&ar->list_lock);
+       list_for_each_entry(vif, &ar->vif_list, list) {
+               if (vif->fw_vif_idx == if_idx) {
+                       found = vif;
+                       break;
+               }
+       }
+       spin_unlock_bh(&ar->list_lock);
+
+       return found;
+}
+
 /*  Performs DIX to 802.3 encapsulation for transmit packets.
  *  Assumes the entire DIX header is contigous and that there is
  *  enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
@@ -162,12 +182,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
 int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
                            u8 msg_type, bool more_data,
                            enum wmi_data_hdr_data_type data_type,
-                           u8 meta_ver, void *tx_meta_info)
+                           u8 meta_ver, void *tx_meta_info, u8 if_idx)
 {
        struct wmi_data_hdr *data_hdr;
        int ret;
 
-       if (WARN_ON(skb == NULL))
+       if (WARN_ON(skb == NULL || (if_idx > MAX_NUM_VIF - 1)))
                return -EINVAL;
 
        if (tx_meta_info) {
@@ -189,7 +209,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
                    WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
 
        data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
-       data_hdr->info3 = 0;
+       data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
 
        return 0;
 }
@@ -216,7 +236,8 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
                return ip_pri;
 }
 
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+                                      struct sk_buff *skb,
                                       u32 layer2_priority, bool wmm_enabled,
                                       u8 *ac)
 {
@@ -262,7 +283,12 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
                        usr_pri = layer2_priority & 0x7;
        }
 
-       /* workaround for WMM S5 */
+       /*
+        * workaround for WMM S5
+        *
+        * FIXME: wmi->traffic_class is always 100 so this test doesn't
+        * make sense
+        */
        if ((wmi->traffic_class == WMM_AC_VI) &&
            ((usr_pri == 5) || (usr_pri == 4)))
                usr_pri = 1;
@@ -284,7 +310,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
                        cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
                /* Implicit streams are created with TSID 0xFF */
                cmd.tsid = WMI_IMPLICIT_PSTREAM;
-               ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+               ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
        }
 
        *ac = traffic_class;
@@ -410,13 +436,14 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
 }
 
 static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
-                                             int len)
+                                             int len, struct ath6kl_vif *vif)
 {
        struct wmi_remain_on_chnl_event *ev;
        u32 freq;
        u32 dur;
        struct ieee80211_channel *chan;
        struct ath6kl *ar = wmi->parent_dev;
+       u32 id;
 
        if (len < sizeof(*ev))
                return -EINVAL;
@@ -426,26 +453,29 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
        dur = le32_to_cpu(ev->duration);
        ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
                   freq, dur);
-       chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+       chan = ieee80211_get_channel(ar->wiphy, freq);
        if (!chan) {
                ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
                           "(freq=%u)\n", freq);
                return -EINVAL;
        }
-       cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT,
+       id = vif->last_roc_id;
+       cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
                                  dur, GFP_ATOMIC);
 
        return 0;
 }
 
 static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
-                                                    u8 *datap, int len)
+                                                    u8 *datap, int len,
+                                                    struct ath6kl_vif *vif)
 {
        struct wmi_cancel_remain_on_chnl_event *ev;
        u32 freq;
        u32 dur;
        struct ieee80211_channel *chan;
        struct ath6kl *ar = wmi->parent_dev;
+       u32 id;
 
        if (len < sizeof(*ev))
                return -EINVAL;
@@ -455,23 +485,29 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
        dur = le32_to_cpu(ev->duration);
        ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
                   "status=%u\n", freq, dur, ev->status);
-       chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+       chan = ieee80211_get_channel(ar->wiphy, freq);
        if (!chan) {
                ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
                           "channel (freq=%u)\n", freq);
                return -EINVAL;
        }
-       cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan,
+       if (vif->last_cancel_roc_id &&
+           vif->last_cancel_roc_id + 1 == vif->last_roc_id)
+               id = vif->last_cancel_roc_id; /* event for cancel command */
+       else
+               id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
+       vif->last_cancel_roc_id = 0;
+       cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
                                           NL80211_CHAN_NO_HT, GFP_ATOMIC);
 
        return 0;
 }
 
-static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                        struct ath6kl_vif *vif)
 {
        struct wmi_tx_status_event *ev;
        u32 id;
-       struct ath6kl *ar = wmi->parent_dev;
 
        if (len < sizeof(*ev))
                return -EINVAL;
@@ -481,7 +517,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
        ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
                   id, ev->ack_status);
        if (wmi->last_mgmt_tx_frame) {
-               cfg80211_mgmt_tx_status(ar->net_dev, id,
+               cfg80211_mgmt_tx_status(vif->ndev, id,
                                        wmi->last_mgmt_tx_frame,
                                        wmi->last_mgmt_tx_frame_len,
                                        !!ev->ack_status, GFP_ATOMIC);
@@ -493,12 +529,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
        return 0;
 }
 
-static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                           struct ath6kl_vif *vif)
 {
        struct wmi_p2p_rx_probe_req_event *ev;
        u32 freq;
        u16 dlen;
-       struct ath6kl *ar = wmi->parent_dev;
 
        if (len < sizeof(*ev))
                return -EINVAL;
@@ -513,10 +549,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
        }
        ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
                   "probe_req_report=%d\n",
-                  dlen, freq, ar->probe_req_report);
+                  dlen, freq, vif->probe_req_report);
 
-       if (ar->probe_req_report || ar->nw_type == AP_NETWORK)
-               cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+       if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
+               cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
 
        return 0;
 }
@@ -536,12 +572,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
        return 0;
 }
 
-static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                        struct ath6kl_vif *vif)
 {
        struct wmi_rx_action_event *ev;
        u32 freq;
        u16 dlen;
-       struct ath6kl *ar = wmi->parent_dev;
 
        if (len < sizeof(*ev))
                return -EINVAL;
@@ -555,7 +591,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
                return -EINVAL;
        }
        ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
-       cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+       cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
 
        return 0;
 }
@@ -620,7 +656,8 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
 }
 
 /* Send a "simple" wmi command -- one with no arguments */
-static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
+                                enum wmi_cmd_id cmd_id)
 {
        struct sk_buff *skb;
        int ret;
@@ -629,7 +666,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
        if (!skb)
                return -ENOMEM;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
 
        return ret;
 }
@@ -641,7 +678,6 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
        if (len < sizeof(struct wmi_ready_event_2))
                return -EINVAL;
 
-       wmi->ready = true;
        ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
                           le32_to_cpu(ev->sw_version),
                           le32_to_cpu(ev->abi_version));
@@ -673,32 +709,73 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
        cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
        cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
 
-       ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG);
+       ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+                           NO_SYNC_WMIFLAG);
 
        return 0;
 }
 
-static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
+{
+       struct sk_buff *skb;
+       struct roam_ctrl_cmd *cmd;
+
+       skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct roam_ctrl_cmd *) skb->data;
+       memset(cmd, 0, sizeof(*cmd));
+
+       memcpy(cmd->info.bssid, bssid, ETH_ALEN);
+       cmd->roam_ctrl = WMI_FORCE_ROAM;
+
+       ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
+       return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+                                  NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
+{
+       struct sk_buff *skb;
+       struct roam_ctrl_cmd *cmd;
+
+       skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct roam_ctrl_cmd *) skb->data;
+       memset(cmd, 0, sizeof(*cmd));
+
+       cmd->info.roam_mode = mode;
+       cmd->roam_ctrl = WMI_SET_ROAM_MODE;
+
+       ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
+       return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+                                  NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                      struct ath6kl_vif *vif)
 {
        struct wmi_connect_event *ev;
        u8 *pie, *peie;
-       struct ath6kl *ar = wmi->parent_dev;
 
        if (len < sizeof(struct wmi_connect_event))
                return -EINVAL;
 
        ev = (struct wmi_connect_event *) datap;
 
-       if (ar->nw_type == AP_NETWORK) {
+       if (vif->nw_type == AP_NETWORK) {
                /* AP mode start/STA connected event */
-               struct net_device *dev = ar->net_dev;
+               struct net_device *dev = vif->ndev;
                if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
                        ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
                                   "(AP started)\n",
                                   __func__, le16_to_cpu(ev->u.ap_bss.ch),
                                   ev->u.ap_bss.bssid);
                        ath6kl_connect_ap_mode_bss(
-                               ar, le16_to_cpu(ev->u.ap_bss.ch));
+                               vif, le16_to_cpu(ev->u.ap_bss.ch));
                } else {
                        ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
                                   "auth=%u keymgmt=%u cipher=%u apsd_info=%u "
@@ -710,7 +787,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
                                   le16_to_cpu(ev->u.ap_sta.cipher),
                                   ev->u.ap_sta.apsd_info);
                        ath6kl_connect_ap_mode_sta(
-                               ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+                               vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
                                ev->u.ap_sta.keymgmt,
                                le16_to_cpu(ev->u.ap_sta.cipher),
                                ev->u.ap_sta.auth, ev->assoc_req_len,
@@ -755,7 +832,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
                pie += pie[1] + 2;
        }
 
-       ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch),
+       ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
                             ev->u.sta.bssid,
                             le16_to_cpu(ev->u.sta.listen_intvl),
                             le16_to_cpu(ev->u.sta.beacon_intvl),
@@ -834,14 +911,15 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
                alpha2[0] = country->isoName[0];
                alpha2[1] = country->isoName[1];
 
-               regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2);
+               regulatory_hint(wmi->parent_dev->wiphy, alpha2);
 
                ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
                                alpha2[0], alpha2[1]);
        }
 }
 
-static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                         struct ath6kl_vif *vif)
 {
        struct wmi_disconnect_event *ev;
        wmi->traffic_class = 100;
@@ -857,10 +935,8 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
                   ev->disconn_reason, ev->assoc_resp_len);
 
        wmi->is_wmm_enabled = false;
-       wmi->pair_crypto_type = NONE_CRYPT;
-       wmi->grp_crypto_type = NONE_CRYPT;
 
-       ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+       ath6kl_disconnect_event(vif, ev->disconn_reason,
                                ev->bssid, ev->assoc_resp_len, ev->assoc_info,
                                le16_to_cpu(ev->proto_reason_status));
 
@@ -886,7 +962,8 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
        return 0;
 }
 
-static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                          struct ath6kl_vif *vif)
 {
        struct wmi_tkip_micerr_event *ev;
 
@@ -895,12 +972,13 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
 
        ev = (struct wmi_tkip_micerr_event *) datap;
 
-       ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+       ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
 
        return 0;
 }
 
-static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                      struct ath6kl_vif *vif)
 {
        struct wmi_bss_info_hdr2 *bih;
        u8 *buf;
@@ -927,26 +1005,27 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
                return 0; /* Only update BSS table for now */
 
        if (bih->frame_type == BEACON_FTYPE &&
-           test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) {
-               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
-               ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+           test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
+               clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+               ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+                                        NONE_BSS_FILTER, 0);
        }
 
-       channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch));
+       channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
        if (channel == NULL)
                return -EINVAL;
 
        if (len < 8 + 2 + 2)
                return -EINVAL;
 
-       if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) &&
-           memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) {
+       if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
+           && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
                const u8 *tim;
                tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
                                       len - 8 - 2 - 2);
                if (tim && tim[1] >= 2) {
-                       ar->assoc_bss_dtim_period = tim[3];
-                       set_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+                       vif->assoc_bss_dtim_period = tim[3];
+                       set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
                }
        }
 
@@ -966,7 +1045,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
                                                  IEEE80211_STYPE_BEACON);
                memset(mgmt->da, 0xff, ETH_ALEN);
        } else {
-               struct net_device *dev = ar->net_dev;
+               struct net_device *dev = vif->ndev;
 
                mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                                  IEEE80211_STYPE_PROBE_RESP);
@@ -979,7 +1058,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
 
        memcpy(&mgmt->u.beacon, buf, len);
 
-       bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt,
+       bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt,
                                        24 + len, (bih->snr - 95) * 100,
                                        GFP_ATOMIC);
        kfree(mgmt);
@@ -1094,20 +1173,21 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
        return 0;
 }
 
-static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
+                                      struct ath6kl_vif *vif)
 {
        struct wmi_scan_complete_event *ev;
 
        ev = (struct wmi_scan_complete_event *) datap;
 
-       ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+       ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
        wmi->is_probe_ssid = false;
 
        return 0;
 }
 
 static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
-                                              int len)
+                                              int len, struct ath6kl_vif *vif)
 {
        struct wmi_neighbor_report_event *ev;
        u8 i;
@@ -1125,7 +1205,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
                ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
                           i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
                           ev->neighbor[i].bss_flags);
-               cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i,
+               cfg80211_pmksa_candidate_notify(vif->ndev, i,
                                                ev->neighbor[i].bssid,
                                                !!(ev->neighbor[i].bss_flags &
                                                   WMI_PREAUTH_CAPABLE_BSS),
@@ -1166,9 +1246,10 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
        return 0;
 }
 
-static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                    struct ath6kl_vif *vif)
 {
-       ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+       ath6kl_tgt_stats_event(vif, datap, len);
 
        return 0;
 }
@@ -1222,7 +1303,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
        cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
        memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
 
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
@@ -1322,7 +1403,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
        return 0;
 }
 
-static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                  struct ath6kl_vif *vif)
 {
        struct wmi_cac_event *reply;
        struct ieee80211_tspec_ie *ts;
@@ -1343,7 +1425,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
                tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
                        IEEE80211_WMM_IE_TSPEC_TID_MASK;
 
-               ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+               ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+                                             reply->ac, tsid);
        } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
                /*
                 * Following assumes that there is only one outstanding
@@ -1358,7 +1441,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
                                break;
                }
                if (index < (sizeof(active_tsids) * 8))
-                       ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+                       ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+                                                     reply->ac, index);
        }
 
        /*
@@ -1403,7 +1487,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
        cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
        memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
 
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
@@ -1528,14 +1612,15 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
        return 0;
 }
 
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
                        enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
 {
        struct wmi_cmd_hdr *cmd_hdr;
        enum htc_endpoint_id ep_id = wmi->ep_id;
        int ret;
+       u16 info1;
 
-       if (WARN_ON(skb == NULL))
+       if (WARN_ON(skb == NULL || (if_idx > (MAX_NUM_VIF - 1))))
                return -EINVAL;
 
        ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
@@ -1554,19 +1639,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
                 * Make sure all data currently queued is transmitted before
                 * the cmd execution.  Establish a new sync point.
                 */
-               ath6kl_wmi_sync_point(wmi);
+               ath6kl_wmi_sync_point(wmi, if_idx);
        }
 
        skb_push(skb, sizeof(struct wmi_cmd_hdr));
 
        cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
        cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
-       cmd_hdr->info1 = 0;     /* added for virtual interface */
+       info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
+       cmd_hdr->info1 = cpu_to_le16(info1);
 
        /* Only for OPT_TX_CMD, use BE endpoint. */
        if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
                ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
-                                             false, false, 0, NULL);
+                                             false, false, 0, NULL, if_idx);
                if (ret) {
                        dev_kfree_skb(skb);
                        return ret;
@@ -1582,13 +1668,14 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
                 * Make sure all new data queued waits for the command to
                 * execute. Establish a new sync point.
                 */
-               ath6kl_wmi_sync_point(wmi);
+               ath6kl_wmi_sync_point(wmi, if_idx);
        }
 
        return 0;
 }
 
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+                          enum network_type nw_type,
                           enum dot11_auth_mode dot11_auth_mode,
                           enum auth_mode auth_mode,
                           enum crypto_type pairwise_crypto,
@@ -1639,15 +1726,14 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
        if (bssid != NULL)
                memcpy(cc->bssid, bssid, ETH_ALEN);
 
-       wmi->pair_crypto_type = pairwise_crypto;
-       wmi->grp_crypto_type = group_crypto;
-
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
+                                 NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+                            u16 channel)
 {
        struct sk_buff *skb;
        struct wmi_reconnect_cmd *cc;
@@ -1668,13 +1754,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
        if (bssid != NULL)
                memcpy(cc->bssid, bssid, ETH_ALEN);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
 {
        int ret;
 
@@ -1683,12 +1769,13 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
        wmi->traffic_class = 100;
 
        /* Disconnect command does not need to do a SYNC before. */
-       ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+       ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
 
        return ret;
 }
 
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+                            enum wmi_scan_type scan_type,
                             u32 force_fgscan, u32 is_legacy,
                             u32 home_dwell_time, u32 force_scan_interval,
                             s8 num_chan, u16 *ch_list)
@@ -1724,13 +1811,14 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
        for (i = 0; i < num_chan; i++)
                sc->ch_list[i] = cpu_to_le16(ch_list[i]);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
+                             u16 fg_start_sec,
                              u16 fg_end_sec, u16 bg_sec,
                              u16 minact_chdw_msec, u16 maxact_chdw_msec,
                              u16 pas_chdw_msec, u8 short_scan_ratio,
@@ -1757,12 +1845,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
        sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
        sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
 {
        struct sk_buff *skb;
        struct wmi_bss_filter_cmd *cmd;
@@ -1779,12 +1867,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
        cmd->bss_filter = filter;
        cmd->ie_mask = cpu_to_le32(ie_mask);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
                              u8 ssid_len, u8 *ssid)
 {
        struct sk_buff *skb;
@@ -1816,12 +1904,13 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
        cmd->ssid_len = ssid_len;
        memcpy(cmd->ssid, ssid, ssid_len);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+                                 u16 listen_interval,
                                  u16 listen_beacons)
 {
        struct sk_buff *skb;
@@ -1836,12 +1925,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
        cmd->listen_intvl = cpu_to_le16(listen_interval);
        cmd->num_beacons = cpu_to_le16(listen_beacons);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
 {
        struct sk_buff *skb;
        struct wmi_power_mode_cmd *cmd;
@@ -1855,12 +1944,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
        cmd->pwr_mode = pwr_mode;
        wmi->pwr_mode = pwr_mode;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
                            u16 ps_poll_num, u16 dtim_policy,
                            u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
                            u16 ps_fail_event_policy)
@@ -1881,12 +1970,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
        pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
        pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
 {
        struct sk_buff *skb;
        struct wmi_disc_timeout_cmd *cmd;
@@ -1899,15 +1988,20 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
        cmd = (struct wmi_disc_timeout_cmd *) skb->data;
        cmd->discon_timeout = timeout;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
                                  NO_SYNC_WMIFLAG);
+
+       if (ret == 0)
+               ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
+
        return ret;
 }
 
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
                          enum crypto_type key_type,
                          u8 key_usage, u8 key_len,
-                         u8 *key_rsc, u8 *key_material,
+                         u8 *key_rsc, unsigned int key_rsc_len,
+                         u8 *key_material,
                          u8 key_op_ctrl, u8 *mac_addr,
                          enum wmi_sync_flag sync_flag)
 {
@@ -1920,7 +2014,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
                   key_index, key_type, key_usage, key_len, key_op_ctrl);
 
        if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
-           (key_material == NULL))
+           (key_material == NULL) || key_rsc_len > 8)
                return -EINVAL;
 
        if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
@@ -1938,20 +2032,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
        memcpy(cmd->key, key_material, key_len);
 
        if (key_rsc != NULL)
-               memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+               memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
 
        cmd->key_op_ctrl = key_op_ctrl;
 
        if (mac_addr)
                memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
                                  sync_flag);
 
        return ret;
 }
 
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
 {
        struct sk_buff *skb;
        struct wmi_add_krk_cmd *cmd;
@@ -1964,12 +2058,13 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
        cmd = (struct wmi_add_krk_cmd *) skb->data;
        memcpy(cmd->krk, krk, WMI_KRK_LEN);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
+                                 NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
 {
        struct sk_buff *skb;
        struct wmi_delete_cipher_key_cmd *cmd;
@@ -1985,13 +2080,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
        cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
        cmd->key_index = key_index;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
                            const u8 *pmkid, bool set)
 {
        struct sk_buff *skb;
@@ -2018,14 +2113,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
                cmd->enable = PMKID_DISABLE;
        }
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
 static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
-                             enum htc_endpoint_id ep_id)
+                             enum htc_endpoint_id ep_id, u8 if_idx)
 {
        struct wmi_data_hdr *data_hdr;
        int ret;
@@ -2037,14 +2132,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
 
        data_hdr = (struct wmi_data_hdr *) skb->data;
        data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
-       data_hdr->info3 = 0;
+       data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
 
        ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
 
        return ret;
 }
 
-static int ath6kl_wmi_sync_point(struct wmi *wmi)
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
 {
        struct sk_buff *skb;
        struct wmi_sync_cmd *cmd;
@@ -2100,7 +2195,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
         * Send sync cmd followed by sync data messages on all
         * endpoints being used
         */
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        if (ret)
@@ -2119,7 +2214,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
                                               traffic_class);
                ret =
                    ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
-                                             ep_id);
+                                             ep_id, if_idx);
 
                if (ret)
                        break;
@@ -2142,7 +2237,7 @@ free_skb:
        return ret;
 }
 
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
                                  struct wmi_create_pstream_cmd *params)
 {
        struct sk_buff *skb;
@@ -2231,12 +2326,13 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
                ath6kl_indicate_tx_activity(wmi->parent_dev,
                                            params->traffic_class, true);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+                                 u8 tsid)
 {
        struct sk_buff *skb;
        struct wmi_delete_pstream_cmd *cmd;
@@ -2272,7 +2368,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
                   "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
                   traffic_class, tsid);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
                                  SYNC_BEFORE_WMIFLAG);
 
        spin_lock_bh(&wmi->lock);
@@ -2311,17 +2407,173 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
        cmd = (struct wmi_set_ip_cmd *) skb->data;
        memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+       ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
+                                 NO_SYNC_WMIFLAG);
+       return ret;
+}
+
+static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
+{
+       u16 active_tsids;
+       u8 stream_exist;
+       int i;
+
+       /*
+        * Relinquish credits from all implicitly created pstreams
+        * since when we go to sleep. If user created explicit
+        * thinstreams exists with in a fatpipe leave them intact
+        * for the user to delete.
+        */
+       spin_lock_bh(&wmi->lock);
+       stream_exist = wmi->fat_pipe_exist;
+       spin_unlock_bh(&wmi->lock);
+
+       for (i = 0; i < WMM_NUM_AC; i++) {
+               if (stream_exist & (1 << i)) {
+
+                       /*
+                        * FIXME: Is this lock & unlock inside
+                        * for loop correct? may need rework.
+                        */
+                       spin_lock_bh(&wmi->lock);
+                       active_tsids = wmi->stream_exist_for_ac[i];
+                       spin_unlock_bh(&wmi->lock);
+
+                       /*
+                        * If there are no user created thin streams
+                        * delete the fatpipe
+                        */
+                       if (!active_tsids) {
+                               stream_exist &= ~(1 << i);
+                               /*
+                                * Indicate inactivity to driver layer for
+                                * this fatpipe (pstream)
+                                */
+                               ath6kl_indicate_tx_activity(wmi->parent_dev,
+                                                           i, false);
+                       }
+               }
+       }
+
+       /* FIXME: Can we do this assignment without locking ? */
+       spin_lock_bh(&wmi->lock);
+       wmi->fat_pipe_exist = stream_exist;
+       spin_unlock_bh(&wmi->lock);
+}
+
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+                                      enum ath6kl_host_mode host_mode)
+{
+       struct sk_buff *skb;
+       struct wmi_set_host_sleep_mode_cmd *cmd;
+       int ret;
+
+       if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
+           (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
+               ath6kl_err("invalid host sleep mode: %d\n", host_mode);
+               return -EINVAL;
+       }
+
+       skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
+
+       if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
+               ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
+               cmd->asleep = cpu_to_le32(1);
+       } else
+               cmd->awake = cpu_to_le32(1);
+
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+                                 WMI_SET_HOST_SLEEP_MODE_CMDID,
+                                 NO_SYNC_WMIFLAG);
        return ret;
 }
 
-static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
-                                           int len)
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+                               enum ath6kl_wow_mode wow_mode,
+                               u32 filter, u16 host_req_delay)
 {
-       if (len < sizeof(struct wmi_get_wow_list_reply))
+       struct sk_buff *skb;
+       struct wmi_set_wow_mode_cmd *cmd;
+       int ret;
+
+       if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
+            wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+               ath6kl_err("invalid wow mode: %d\n", wow_mode);
                return -EINVAL;
+       }
 
-       return 0;
+       skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
+       cmd->enable_wow = cpu_to_le32(wow_mode);
+       cmd->filter = cpu_to_le32(filter);
+       cmd->host_req_delay = cpu_to_le16(host_req_delay);
+
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
+                                 NO_SYNC_WMIFLAG);
+       return ret;
+}
+
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+                                  u8 list_id, u8 filter_size,
+                                  u8 filter_offset, u8 *filter, u8 *mask)
+{
+       struct sk_buff *skb;
+       struct wmi_add_wow_pattern_cmd *cmd;
+       u16 size;
+       u8 *filter_mask;
+       int ret;
+
+       /*
+        * Allocate additional memory in the buffer to hold
+        * filter and mask value, which is twice of filter_size.
+        */
+       size = sizeof(*cmd) + (2 * filter_size);
+
+       skb = ath6kl_wmi_get_new_buf(size);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
+       cmd->filter_list_id = list_id;
+       cmd->filter_size = filter_size;
+       cmd->filter_offset = filter_offset;
+
+       memcpy(cmd->filter, filter, filter_size);
+
+       filter_mask = (u8 *) (cmd->filter + filter_size);
+       memcpy(filter_mask, mask, filter_size);
+
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
+                                 NO_SYNC_WMIFLAG);
+
+       return ret;
+}
+
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+                                  u16 list_id, u16 filter_id)
+{
+       struct sk_buff *skb;
+       struct wmi_del_wow_pattern_cmd *cmd;
+       int ret;
+
+       skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
+       cmd->filter_list_id = cpu_to_le16(list_id);
+       cmd->filter_id = cpu_to_le16(filter_id);
+
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
+                                 NO_SYNC_WMIFLAG);
+       return ret;
 }
 
 static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
@@ -2336,7 +2588,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
        cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
        cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+       ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
 
        return ret;
 }
@@ -2379,12 +2631,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
        return ret;
 }
 
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
 {
-       return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+       return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
 }
 
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
 {
        struct sk_buff *skb;
        struct wmi_set_tx_pwr_cmd *cmd;
@@ -2397,18 +2649,24 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
        cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
        cmd->dbM = dbM;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
 {
-       return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+       return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
 }
 
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
+{
+       return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
+}
+
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+                                u8 preamble_policy)
 {
        struct sk_buff *skb;
        struct wmi_set_lpreamble_cmd *cmd;
@@ -2422,7 +2680,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
        cmd->status = status;
        cmd->preamble_policy = preamble_policy;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
@@ -2440,11 +2698,12 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
        cmd = (struct wmi_set_rts_cmd *) skb->data;
        cmd->threshold = cpu_to_le16(threshold);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+       ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
+                                 NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
 {
        struct sk_buff *skb;
        struct wmi_set_wmm_txop_cmd *cmd;
@@ -2460,12 +2719,13 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
        cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
        cmd->txop_enable = cfg;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
                                  NO_SYNC_WMIFLAG);
        return ret;
 }
 
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+                                u8 keep_alive_intvl)
 {
        struct sk_buff *skb;
        struct wmi_set_keepalive_cmd *cmd;
@@ -2477,10 +2737,13 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
 
        cmd = (struct wmi_set_keepalive_cmd *) skb->data;
        cmd->keep_alive_intvl = keep_alive_intvl;
-       wmi->keep_alive_intvl = keep_alive_intvl;
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
                                  NO_SYNC_WMIFLAG);
+
+       if (ret == 0)
+               ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
+
        return ret;
 }
 
@@ -2495,7 +2758,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
 
        memcpy(skb->data, buf, len);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+       ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
 
        return ret;
 }
@@ -2528,28 +2791,31 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
        return 0;
 }
 
-static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                        struct ath6kl_vif *vif)
 {
        struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
 
-       aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+       aggr_recv_addba_req_evt(vif, cmd->tid,
                                le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
 
        return 0;
 }
 
-static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                        struct ath6kl_vif *vif)
 {
        struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
 
-       aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+       aggr_recv_delba_req_evt(vif, cmd->tid);
 
        return 0;
 }
 
 /*  AP mode functions */
 
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+                                struct wmi_connect_cmd *p)
 {
        struct sk_buff *skb;
        struct wmi_connect_cmd *cm;
@@ -2562,7 +2828,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
        cm = (struct wmi_connect_cmd *) skb->data;
        memcpy(cm, p, sizeof(*cm));
 
-       res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+       res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
                                  NO_SYNC_WMIFLAG);
        ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
                   "ctrl_flags=0x%x-> res=%d\n",
@@ -2571,7 +2837,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
        return res;
 }
 
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
+                          u16 reason)
 {
        struct sk_buff *skb;
        struct wmi_ap_set_mlme_cmd *cm;
@@ -2585,11 +2852,12 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
        cm->reason = cpu_to_le16(reason);
        cm->cmd = cmd;
 
-       return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID,
+       return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                     struct ath6kl_vif *vif)
 {
        struct wmi_pspoll_event *ev;
 
@@ -2598,19 +2866,21 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
 
        ev = (struct wmi_pspoll_event *) datap;
 
-       ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+       ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
 
        return 0;
 }
 
-static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
+                                         struct ath6kl_vif *vif)
 {
-       ath6kl_dtimexpiry_event(wmi->parent_dev);
+       ath6kl_dtimexpiry_event(vif);
 
        return 0;
 }
 
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
+                          bool flag)
 {
        struct sk_buff *skb;
        struct wmi_ap_set_pvb_cmd *cmd;
@@ -2625,13 +2895,14 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
        cmd->rsvd = cpu_to_le16(0);
        cmd->flag = cpu_to_le32(flag);
 
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return 0;
 }
 
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+                                      u8 rx_meta_ver,
                                       bool rx_dot11_hdr, bool defrag_on_host)
 {
        struct sk_buff *skb;
@@ -2648,14 +2919,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
        cmd->meta_ver = rx_meta_ver;
 
        /* Delete the local aggr state, on host */
-       ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+       ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
                                  NO_SYNC_WMIFLAG);
 
        return ret;
 }
 
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
-                            u8 ie_len)
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+                            const u8 *ie, u8 ie_len)
 {
        struct sk_buff *skb;
        struct wmi_set_appie_cmd *p;
@@ -2670,7 +2941,7 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
        p->mgmt_frm_type = mgmt_frm_type;
        p->ie_len = ie_len;
        memcpy(p->ie_info, ie, ie_len);
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
@@ -2688,11 +2959,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
        cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
        cmd->disable = disable ? 1 : 0;
 
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
 {
        struct sk_buff *skb;
        struct wmi_remain_on_chnl_cmd *p;
@@ -2706,12 +2977,12 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
        p = (struct wmi_remain_on_chnl_cmd *) skb->data;
        p->freq = cpu_to_le32(freq);
        p->duration = cpu_to_le32(dur);
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
-                              const u8 *data, u16 data_len)
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+                              u32 wait, const u8 *data, u16 data_len)
 {
        struct sk_buff *skb;
        struct wmi_send_action_cmd *p;
@@ -2731,6 +3002,7 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
        }
 
        kfree(wmi->last_mgmt_tx_frame);
+       memcpy(buf, data, data_len);
        wmi->last_mgmt_tx_frame = buf;
        wmi->last_mgmt_tx_frame_len = data_len;
 
@@ -2742,13 +3014,13 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
        p->wait = cpu_to_le32(wait);
        p->len = cpu_to_le16(data_len);
        memcpy(p->data, data, data_len);
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
-                                      const u8 *dst,
-                                      const u8 *data, u16 data_len)
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+                                      const u8 *dst, const u8 *data,
+                                      u16 data_len)
 {
        struct sk_buff *skb;
        struct wmi_p2p_probe_response_cmd *p;
@@ -2764,11 +3036,12 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
        memcpy(p->destination_addr, dst, ETH_ALEN);
        p->len = cpu_to_le16(data_len);
        memcpy(p->data, data, data_len);
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+                                  WMI_SEND_PROBE_RESPONSE_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
 {
        struct sk_buff *skb;
        struct wmi_probe_req_report_cmd *p;
@@ -2781,11 +3054,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
                   enable);
        p = (struct wmi_probe_req_report_cmd *) skb->data;
        p->enable = enable ? 1 : 0;
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
 {
        struct sk_buff *skb;
        struct wmi_get_p2p_info *p;
@@ -2798,14 +3071,15 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
                   info_req_flags);
        p = (struct wmi_get_p2p_info *) skb->data;
        p->info_req_flags = cpu_to_le32(info_req_flags);
-       return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID,
+       return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
                                   NO_SYNC_WMIFLAG);
 }
 
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi)
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
 {
        ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
-       return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+       return ath6kl_wmi_simple_cmd(wmi, if_idx,
+                                    WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
 }
 
 static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
@@ -2818,7 +3092,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
 
        if (skb->len < sizeof(struct wmix_cmd_hdr)) {
                ath6kl_err("bad packet 1\n");
-               wmi->stat.cmd_len_err++;
                return -EINVAL;
        }
 
@@ -2840,7 +3113,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
                break;
        default:
                ath6kl_warn("unknown cmd id 0x%x\n", id);
-               wmi->stat.cmd_id_err++;
                ret = -EINVAL;
                break;
        }
@@ -2848,12 +3120,19 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
        return ret;
 }
 
+static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+       return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
+}
+
 /* Control Path */
 int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
 {
        struct wmi_cmd_hdr *cmd;
+       struct ath6kl_vif *vif;
        u32 len;
        u16 id;
+       u8 if_idx;
        u8 *datap;
        int ret = 0;
 
@@ -2863,12 +3142,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
        if (skb->len < sizeof(struct wmi_cmd_hdr)) {
                ath6kl_err("bad packet 1\n");
                dev_kfree_skb(skb);
-               wmi->stat.cmd_len_err++;
                return -EINVAL;
        }
 
        cmd = (struct wmi_cmd_hdr *) skb->data;
        id = le16_to_cpu(cmd->cmd_id);
+       if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
 
        skb_pull(skb, sizeof(struct wmi_cmd_hdr));
 
@@ -2879,6 +3158,15 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
        ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
                        datap, len);
 
+       vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+       if (!vif) {
+               ath6kl_dbg(ATH6KL_DBG_WMI,
+                          "Wmi event for unavailable vif, vif_index:%d\n",
+                           if_idx);
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+
        switch (id) {
        case WMI_GET_BITRATE_CMDID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -2898,11 +3186,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_CONNECT_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
-               ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
                break;
        case WMI_DISCONNECT_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
-               ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
                break;
        case WMI_PEER_NODE_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
@@ -2910,11 +3198,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_TKIP_MICERR_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
-               ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
                break;
        case WMI_BSSINFO_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
-               ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
                break;
        case WMI_REGDOMAIN_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
@@ -2926,11 +3214,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_NEIGHBOR_REPORT_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
-               ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+                                                         vif);
                break;
        case WMI_SCAN_COMPLETE_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
-               ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+               ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
                break;
        case WMI_CMDERROR_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
@@ -2938,7 +3227,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_REPORT_STATISTICS_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
-               ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
                break;
        case WMI_RSSI_THRESHOLD_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
@@ -2953,6 +3242,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_REPORT_ROAM_TBL_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+               ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
                break;
        case WMI_EXTENSION_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
@@ -2960,7 +3250,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_CAC_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
-               ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
                break;
        case WMI_CHANNEL_CHANGE_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
@@ -2996,7 +3286,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_GET_WOW_LIST_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
-               ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
                break;
        case WMI_GET_PMKID_LIST_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
@@ -3004,25 +3293,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_PSPOLL_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
-               ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
                break;
        case WMI_DTIMEXPIRY_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
-               ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
                break;
        case WMI_SET_PARAMS_REPLY_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
                break;
        case WMI_ADDBA_REQ_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
-               ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
                break;
        case WMI_ADDBA_RESP_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
                break;
        case WMI_DELBA_REQ_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
-               ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
                break;
        case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI,
@@ -3038,21 +3327,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_REMAIN_ON_CHNL_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
-               ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
                break;
        case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI,
                           "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
                ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
-                                                               len);
+                                                               len, vif);
                break;
        case WMI_TX_STATUS_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
-               ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
                break;
        case WMI_RX_PROBE_REQ_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
-               ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
                break;
        case WMI_P2P_CAPABILITIES_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
@@ -3060,7 +3349,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        case WMI_RX_ACTION_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
-               ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len);
+               ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
                break;
        case WMI_P2P_INFO_EVENTID:
                ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
@@ -3068,7 +3357,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
                break;
        default:
                ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
-               wmi->stat.cmd_id_err++;
                ret = -EINVAL;
                break;
        }
@@ -3078,11 +3366,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
        return ret;
 }
 
-static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+void ath6kl_wmi_reset(struct wmi *wmi)
 {
-       if (!wmi)
-               return;
-
        spin_lock_bh(&wmi->lock);
 
        wmi->fat_pipe_exist = 0;
@@ -3103,16 +3388,9 @@ void *ath6kl_wmi_init(struct ath6kl *dev)
 
        wmi->parent_dev = dev;
 
-       ath6kl_wmi_qos_state_init(wmi);
-
        wmi->pwr_mode = REC_POWER;
-       wmi->phy_mode = WMI_11G_MODE;
-
-       wmi->pair_crypto_type = NONE_CRYPT;
-       wmi->grp_crypto_type = NONE_CRYPT;
 
-       wmi->ht_allowed[A_BAND_24GHZ] = 1;
-       wmi->ht_allowed[A_BAND_5GHZ] = 1;
+       ath6kl_wmi_reset(wmi);
 
        return wmi;
 }
index f8e644d..76342d5 100644 (file)
@@ -93,11 +93,6 @@ struct sq_threshold_params {
        u8 last_rssi_poll_event;
 };
 
-struct wmi_stats {
-       u32 cmd_len_err;
-       u32 cmd_id_err;
-};
-
 struct wmi_data_sync_bufs {
        u8 traffic_class;
        struct sk_buff *skb;
@@ -111,32 +106,26 @@ struct wmi_data_sync_bufs {
 #define WMM_AC_VO   3          /* voice */
 
 struct wmi {
-       bool ready;
        u16 stream_exist_for_ac[WMM_NUM_AC];
        u8 fat_pipe_exist;
        struct ath6kl *parent_dev;
-       struct wmi_stats stat;
        u8 pwr_mode;
-       u8 phy_mode;
-       u8 keep_alive_intvl;
        spinlock_t lock;
        enum htc_endpoint_id ep_id;
        struct sq_threshold_params
            sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
-       enum crypto_type pair_crypto_type;
-       enum crypto_type grp_crypto_type;
        bool is_wmm_enabled;
-       u8 ht_allowed[A_NUM_BANDS];
        u8 traffic_class;
        bool is_probe_ssid;
 
        u8 *last_mgmt_tx_frame;
        size_t last_mgmt_tx_frame_len;
+       u8 saved_pwr_mode;
 };
 
 struct host_app_area {
-       u32 wmi_protocol_ver;
-};
+       __le32 wmi_protocol_ver;
+} __packed;
 
 enum wmi_msg_type {
        DATA_MSGTYPE = 0x0,
@@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type {
 #define WMI_DATA_HDR_META_MASK      0x7
 #define WMI_DATA_HDR_META_SHIFT     13
 
+#define WMI_DATA_HDR_IF_IDX_MASK    0xF
+
 struct wmi_data_hdr {
        s8 rssi;
 
@@ -208,6 +199,12 @@ struct wmi_data_hdr {
         * b15:b13      - META_DATA_VERSION 0 - 7
         */
        __le16 info2;
+
+       /*
+        * usage of info3, 16-bit:
+        * b3:b0        - Interface index
+        * b15:b4       - Reserved
+        */
        __le16 info3;
 } __packed;
 
@@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
                               WMI_DATA_HDR_META_MASK;
 }
 
+static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
+{
+       return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
+}
+
 /* Tx meta version definitions */
 #define WMI_MAX_TX_META_SZ     12
 #define WMI_META_VERSION_1     0x01
@@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 {
        u8 csum_flags;
 } __packed;
 
+#define WMI_CMD_HDR_IF_ID_MASK 0xF
+
 /* Control Path */
 struct wmi_cmd_hdr {
        __le16 cmd_id;
@@ -312,6 +316,11 @@ struct wmi_cmd_hdr {
        __le16 reserved;
 } __packed;
 
+static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
+{
+       return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
+}
+
 /* List of WMI commands */
 enum wmi_cmd_id {
        WMI_CONNECT_CMDID = 0x0001,
@@ -576,9 +585,6 @@ enum auth_mode {
        WPA2_AUTH_CCKM = 0x40,
 };
 
-#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
-#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
-
 #define WMI_MIN_KEY_INDEX   0
 #define WMI_MAX_KEY_INDEX   3
 
@@ -617,6 +623,7 @@ enum wmi_connect_ctrl_flags_bits {
        CONNECT_CSA_FOLLOW_BSS = 0x0020,
        CONNECT_DO_WPA_OFFLOAD = 0x0040,
        CONNECT_DO_NOT_DEAUTH = 0x0080,
+       CONNECT_WPS_FLAG = 0x0100,
 };
 
 struct wmi_connect_cmd {
@@ -1365,14 +1372,20 @@ enum wmi_roam_ctrl {
        WMI_SET_LRSSI_SCAN_PARAMS,
 };
 
+enum wmi_roam_mode {
+       WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
+       WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
+       WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
+};
+
 struct bss_bias {
        u8 bssid[ETH_ALEN];
-       u8  bias;
+       s8 bias;
 } __packed;
 
 struct bss_bias_info {
        u8 num_bss;
-       struct bss_bias bss_bias[1];
+       struct bss_bias bss_bias[0];
 } __packed;
 
 struct low_rssi_scan_params {
@@ -1385,10 +1398,11 @@ struct low_rssi_scan_params {
 
 struct roam_ctrl_cmd {
        union {
-               u8 bssid[ETH_ALEN];
-               u8 roam_mode;
-               struct bss_bias_info bss;
-               struct low_rssi_scan_params params;
+               u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
+               u8 roam_mode; /* WMI_SET_ROAM_MODE */
+               struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
+               struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
+                                                    */
        } __packed info;
        u8 roam_ctrl;
 } __packed;
@@ -1455,6 +1469,10 @@ struct wmi_tkip_micerr_event {
        u8 is_mcast;
 } __packed;
 
+enum wmi_scan_status {
+       WMI_SCAN_STATUS_SUCCESS = 0,
+};
+
 /* WMI_SCAN_COMPLETE_EVENTID */
 struct wmi_scan_complete_event {
        a_sle32 status;
@@ -1635,6 +1653,12 @@ struct wmi_bss_roam_info {
        u8 reserved;
 } __packed;
 
+struct wmi_target_roam_tbl {
+       __le16 roam_mode;
+       __le16 num_entries;
+       struct wmi_bss_roam_info info[];
+} __packed;
+
 /* WMI_CAC_EVENTID */
 enum cac_indication {
        CAC_INDICATION_ADMISSION = 0x00,
@@ -1771,7 +1795,6 @@ struct wmi_set_appie_cmd {
 #define WSC_REG_ACTIVE     1
 #define WSC_REG_INACTIVE   0
 
-#define WOW_MAX_FILTER_LISTS    1
 #define WOW_MAX_FILTERS_PER_LIST 4
 #define WOW_PATTERN_SIZE        64
 #define WOW_MASK_SIZE           64
@@ -1794,17 +1817,52 @@ struct wmi_set_ip_cmd {
        __le32 ips[MAX_IP_ADDRS];
 } __packed;
 
-/* WMI_GET_WOW_LIST_CMD reply  */
-struct wmi_get_wow_list_reply {
-       /* number of patterns in reply */
-       u8 num_filters;
+enum ath6kl_wow_filters {
+       WOW_FILTER_SSID                 = BIT(0),
+       WOW_FILTER_OPTION_MAGIC_PACKET  = BIT(2),
+       WOW_FILTER_OPTION_EAP_REQ       = BIT(3),
+       WOW_FILTER_OPTION_PATTERNS      = BIT(4),
+       WOW_FILTER_OPTION_OFFLOAD_ARP   = BIT(5),
+       WOW_FILTER_OPTION_OFFLOAD_NS    = BIT(6),
+       WOW_FILTER_OPTION_OFFLOAD_GTK   = BIT(7),
+       WOW_FILTER_OPTION_8021X_4WAYHS  = BIT(8),
+       WOW_FILTER_OPTION_NLO_DISCVRY   = BIT(9),
+       WOW_FILTER_OPTION_NWK_DISASSOC  = BIT(10),
+       WOW_FILTER_OPTION_GTK_ERROR     = BIT(11),
+       WOW_FILTER_OPTION_TEST_MODE     = BIT(15),
+};
 
-       /* this is filter # x of total num_filters */
-       u8 this_filter_num;
+enum ath6kl_host_mode {
+       ATH6KL_HOST_MODE_AWAKE,
+       ATH6KL_HOST_MODE_ASLEEP,
+};
+
+struct wmi_set_host_sleep_mode_cmd {
+       __le32 awake;
+       __le32 asleep;
+} __packed;
+
+enum ath6kl_wow_mode {
+       ATH6KL_WOW_MODE_DISABLE,
+       ATH6KL_WOW_MODE_ENABLE,
+};
+
+struct wmi_set_wow_mode_cmd {
+       __le32 enable_wow;
+       __le32 filter;
+       __le16 host_req_delay;
+} __packed;
+
+struct wmi_add_wow_pattern_cmd {
+       u8 filter_list_id;
+       u8 filter_size;
+       u8 filter_offset;
+       u8 filter[0];
+} __packed;
 
-       u8 wow_mode;
-       u8 host_mode;
-       struct wow_filter wow_filters[1];
+struct wmi_del_wow_pattern_cmd {
+       __le16 filter_list_id;
+       __le16 filter_id;
 } __packed;
 
 /* WMI_SET_AKMP_PARAMS_CMD */
@@ -2163,20 +2221,21 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
 int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
                            u8 msg_type, bool more_data,
                            enum wmi_data_hdr_data_type data_type,
-                           u8 meta_ver, void *tx_meta_info);
+                           u8 meta_ver, void *tx_meta_info, u8 if_idx);
 
 int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
 int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
-                                      u32 layer2_priority, bool wmm_enabled,
-                                      u8 *ac);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+                                      struct sk_buff *skb, u32 layer2_priority,
+                                      bool wmm_enabled, u8 *ac);
 
 int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
 
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
                        enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
 
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+                          enum network_type nw_type,
                           enum dot11_auth_mode dot11_auth_mode,
                           enum auth_mode auth_mode,
                           enum crypto_type pairwise_crypto,
@@ -2185,98 +2244,124 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
                           u8 group_crypto_len, int ssid_len, u8 *ssid,
                           u8 *bssid, u16 channel, u32 ctrl_flags);
 
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+                            u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+                            enum wmi_scan_type scan_type,
                             u32 force_fgscan, u32 is_legacy,
                             u32 home_dwell_time, u32 force_scan_interval,
                             s8 num_chan, u16 *ch_list);
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
                              u16 fg_end_sec, u16 bg_sec,
                              u16 minact_chdw_msec, u16 maxact_chdw_msec,
                              u16 pas_chdw_msec, u8 short_scan_ratio,
                              u8 scan_ctrl_flag, u32 max_dfsch_act_time,
                              u16 maxact_scan_per_ssid);
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
+                            u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
                              u8 ssid_len, u8 *ssid);
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+                                 u16 listen_interval,
                                  u16 listen_beacons);
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
                            u16 ps_poll_num, u16 dtim_policy,
                            u16 tx_wakup_policy, u16 num_tx_to_wakeup,
                            u16 ps_fail_event_policy);
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
                                  struct wmi_create_pstream_cmd *pstream);
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+                                 u8 tsid);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
 
 int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
                                 u8 preamble_policy);
 
 int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
 int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
 
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
                          enum crypto_type key_type,
                          u8 key_usage, u8 key_len,
-                         u8 *key_rsc, u8 *key_material,
+                         u8 *key_rsc, unsigned int key_rsc_len,
+                         u8 *key_material,
                          u8 key_op_ctrl, u8 *mac_addr,
                          enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
                            const u8 *pmkid, bool set);
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
 
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+                                u8 keep_alive_intvl);
 int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
 
 s32 ath6kl_wmi_get_rate(s8 rate_index);
 
 int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+                                      enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+                               enum ath6kl_wow_mode wow_mode,
+                               u32 filter, u16 host_req_delay);
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+                                  u8 list_id, u8 filter_size,
+                                  u8 filter_offset, u8 *filter, u8 *mask);
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+                                  u16 list_id, u16 filter_id);
 int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
 
 /* AP mode */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p);
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+                                struct wmi_connect_cmd *p);
 
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason);
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
+                          const u8 *mac, u16 reason);
 
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
 
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+                                      u8 rx_meta_version,
                                       bool rx_dot11_hdr, bool defrag_on_host);
 
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
-                            u8 ie_len);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+                            const u8 *ie, u8 ie_len);
 
 /* P2P */
 int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
 
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur);
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+                                 u32 dur);
 
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
-                              const u8 *data, u16 data_len);
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+                              u32 wait, const u8 *data, u16 data_len);
 
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
-                                      const u8 *dst,
-                                      const u8 *data, u16 data_len);
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+                                      const u8 *dst, const u8 *data,
+                                      u16 data_len);
 
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable);
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
 
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags);
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
 
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi);
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
 
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
-                            u8 ie_len);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+                            const u8 *ie, u8 ie_len);
 
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
 void *ath6kl_wmi_init(struct ath6kl *devt);
 void ath6kl_wmi_shutdown(struct wmi *wmi);
+void ath6kl_wmi_reset(struct wmi *wmi);
 
 #endif /* WMI_H */
index d9c08c6..7b4c074 100644 (file)
@@ -25,6 +25,7 @@ config ATH9K
 
 config ATH9K_PCI
        bool "Atheros ath9k PCI/PCIe bus support"
+       default y
        depends on ATH9K && PCI
        ---help---
          This option enables the PCI bus support in ath9k.
index 36ed3c4..390797d 100644 (file)
@@ -4,6 +4,7 @@ ath9k-y +=      beacon.o \
                main.o \
                recv.o \
                xmit.o \
+               mci.o \
 
 ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_ATH9K_PCI) += pci.o
@@ -33,7 +34,8 @@ ath9k_hw-y:=  \
                ar9002_mac.o \
                ar9003_mac.o \
                ar9003_eeprom.o \
-               ar9003_paprd.o
+               ar9003_paprd.o \
+               ar9003_mci.o
 
 obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
 
index 88279e3..157337f 100644 (file)
@@ -203,7 +203,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
                        i);
 
                ath_dbg(common, ATH_DBG_CALIBRATE,
-                       "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+                       "Original: Chn %d iq_corr_meas = 0x%08x\n",
                        i, ah->totalIqCorrMeas[i]);
 
                iqCorrNeg = 0;
index 12a730d..23b3a6c 100644 (file)
@@ -18,6 +18,7 @@
 #include "hw-ops.h"
 #include "ar9003_phy.h"
 #include "ar9003_rtt.h"
+#include "ar9003_mci.h"
 
 #define MAX_MEASUREMENT        MAX_IQCAL_MEASUREMENT
 #define MAX_MAG_DELTA  11
@@ -225,7 +226,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
                        i);
 
                ath_dbg(common, ATH_DBG_CALIBRATE,
-                       "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+                       "Original: Chn %d iq_corr_meas = 0x%08x\n",
                        i, ah->totalIqCorrMeas[i]);
 
                iqCorrNeg = 0;
@@ -824,7 +825,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
                                                chan_info_tab[i] + offset);
 
                                ath_dbg(common, ATH_DBG_CALIBRATE,
-                                       "IQ RES[%d]=0x%x"
+                                       "IQ_RES[%d]=0x%x "
                                        "IQ_RES[%d]=0x%x\n",
                                        idx, iq_res[idx], idx + 1,
                                        iq_res[idx + 1]);
@@ -934,10 +935,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_cal_data *caldata = ah->caldata;
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
        bool txiqcal_done = false, txclcal_done = false;
        bool is_reusable = true, status = true;
        bool run_rtt_cal = false, run_agc_cal;
        bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+       bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
        u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
                                          AR_PHY_AGC_CONTROL_FLTR_CAL   |
                                          AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1005,6 +1008,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
        } else if (caldata && !caldata->done_txiqcal_once)
                run_agc_cal = true;
 
+       if (mci && IS_CHAN_2GHZ(chan) &&
+           (mci_hw->bt_state  == MCI_BT_AWAKE) &&
+           run_agc_cal &&
+           !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+               u32 pld[4] = {0, 0, 0, 0};
+
+               /* send CAL_REQ only when BT is AWAKE. */
+               ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+                       mci_hw->wlan_cal_seq);
+               MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+               pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+               ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+               /* Wait BT_CAL_GRANT for 50ms */
+               ath_dbg(common, ATH_DBG_MCI, "MCI wait for BT_CAL_GRANT");
+
+               if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+                       ath_dbg(common, ATH_DBG_MCI, "MCI got BT_CAL_GRANT");
+               else {
+                       is_reusable = false;
+                       ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is not responding");
+               }
+       }
+
        txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
        REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
        udelay(5);
@@ -1022,6 +1050,21 @@ skip_tx_iqcal:
                                       AR_PHY_AGC_CONTROL_CAL,
                                       0, AH_WAIT_TIMEOUT);
        }
+
+       if (mci && IS_CHAN_2GHZ(chan) &&
+           (mci_hw->bt_state  == MCI_BT_AWAKE) &&
+           run_agc_cal &&
+           !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+               u32 pld[4] = {0, 0, 0, 0};
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+                       mci_hw->wlan_cal_done);
+               MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+               pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+               ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+       }
+
        if (rtt && !run_rtt_cal) {
                agc_ctrl |= agc_supp_cals;
                REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
index 3b262ba..4ba6f52 100644 (file)
@@ -121,10 +121,8 @@ static const struct ar9300_eeprom ar9300_default = {
                 * if the register is per chain
                 */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {1, 1, 1},/* 3 chain */
-               .db_stage2 = {1, 1, 1}, /* 3 chain  */
-               .db_stage3 = {0, 0, 0},
-               .db_stage4 = {0, 0, 0},
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -144,7 +142,7 @@ static const struct ar9300_eeprom ar9300_default = {
         },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -323,10 +321,8 @@ static const struct ar9300_eeprom ar9300_default = {
                .spurChans = {0, 0, 0, 0, 0},
                /* noiseFloorThreshCh Check if the register is per chain */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {3, 3, 3}, /* 3 chain */
-               .db_stage2 = {3, 3, 3}, /* 3 chain */
-               .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-               .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -698,10 +694,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
                 * if the register is per chain
                 */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {1, 1, 1},/* 3 chain */
-               .db_stage2 = {1, 1, 1}, /* 3 chain  */
-               .db_stage3 = {0, 0, 0},
-               .db_stage4 = {0, 0, 0},
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -721,7 +715,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
         },
         .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
         },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -900,10 +894,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
                .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
                /* noiseFloorThreshCh Check if the register is per chain */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {3, 3, 3}, /* 3 chain */
-               .db_stage2 = {3, 3, 3}, /* 3 chain */
-               .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-               .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0xf,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -1276,10 +1268,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
                 * if the register is per chain
                 */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {1, 1, 1},/* 3 chain */
-               .db_stage2 = {1, 1, 1}, /* 3 chain  */
-               .db_stage3 = {0, 0, 0},
-               .db_stage4 = {0, 0, 0},
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -1291,20 +1281,20 @@ static const struct ar9300_eeprom ar9300_h112 = {
                .txEndToRxOn = 0x2,
                .txFrameToXpaOn = 0xe,
                .thresh62 = 28,
-               .papdRateMaskHt20 = LE32(0x80c080),
-               .papdRateMaskHt40 = LE32(0x80c080),
+               .papdRateMaskHt20 = LE32(0x0c80c080),
+               .papdRateMaskHt40 = LE32(0x0080c080),
                .futureModal = {
                        0, 0, 0, 0, 0, 0, 0, 0,
                },
        },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
                FREQ2FBIN(2437, 1),
-               FREQ2FBIN(2472, 1),
+               FREQ2FBIN(2462, 1),
        },
        /* ar9300_cal_data_per_freq_op_loop 2g */
        .calPierData2G = {
@@ -1314,7 +1304,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
        },
        .calTarget_freqbin_Cck = {
                FREQ2FBIN(2412, 1),
-               FREQ2FBIN(2484, 1),
+               FREQ2FBIN(2472, 1),
        },
        .calTarget_freqbin_2G = {
                FREQ2FBIN(2412, 1),
@@ -1478,10 +1468,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
                .spurChans = {0, 0, 0, 0, 0},
                /* noiseFloorThreshCh Check if the register is per chain */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {3, 3, 3}, /* 3 chain */
-               .db_stage2 = {3, 3, 3}, /* 3 chain */
-               .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-               .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -1515,7 +1503,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
                FREQ2FBIN(5500, 0),
                FREQ2FBIN(5600, 0),
                FREQ2FBIN(5700, 0),
-               FREQ2FBIN(5825, 0)
+               FREQ2FBIN(5785, 0)
        },
        .calPierData5G = {
                {
@@ -1854,10 +1842,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
                 * if the register is per chain
                 */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {1, 1, 1},/* 3 chain */
-               .db_stage2 = {1, 1, 1}, /* 3 chain  */
-               .db_stage3 = {0, 0, 0},
-               .db_stage4 = {0, 0, 0},
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -1877,7 +1863,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
        },
        .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
        },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
@@ -2056,10 +2042,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
                .spurChans = {0, 0, 0, 0, 0},
                /* noiseFloorThreshch check if the register is per chain */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {3, 3, 3}, /* 3 chain */
-               .db_stage2 = {3, 3, 3}, /* 3 chain */
-               .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-               .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -2431,10 +2415,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
                 * if the register is per chain
                 */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {1, 1, 1},/* 3 chain */
-               .db_stage2 = {1, 1, 1}, /* 3 chain  */
-               .db_stage3 = {0, 0, 0},
-               .db_stage4 = {0, 0, 0},
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -2454,12 +2436,12 @@ static const struct ar9300_eeprom ar9300_h116 = {
         },
         .base_ext1 = {
                .ant_div_control = 0,
-               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+               .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
         },
        .calFreqPier2G = {
                FREQ2FBIN(2412, 1),
                FREQ2FBIN(2437, 1),
-               FREQ2FBIN(2472, 1),
+               FREQ2FBIN(2462, 1),
         },
        /* ar9300_cal_data_per_freq_op_loop 2g */
        .calPierData2G = {
@@ -2633,10 +2615,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
                .spurChans = {0, 0, 0, 0, 0},
                /* noiseFloorThreshCh Check if the register is per chain */
                .noiseFloorThreshCh = {-1, 0, 0},
-               .ob = {3, 3, 3}, /* 3 chain */
-               .db_stage2 = {3, 3, 3}, /* 3 chain */
-               .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
-               .db_stage4 = {3, 3, 3},  /* don't exist for 2G */
+               .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+               .quick_drop = 0,
                .xpaBiasLvl = 0,
                .txFrameToDataStart = 0x0e,
                .txFrameToPaOn = 0x0e,
@@ -2663,7 +2643,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
                .xatten1MarginHigh = {0, 0, 0}
         },
        .calFreqPier5G = {
-               FREQ2FBIN(5180, 0),
+               FREQ2FBIN(5160, 0),
                FREQ2FBIN(5220, 0),
                FREQ2FBIN(5320, 0),
                FREQ2FBIN(5400, 0),
@@ -3023,6 +3003,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
                return eep->modalHeader5G.antennaGain;
        case EEP_ANTENNA_GAIN_2G:
                return eep->modalHeader2G.antennaGain;
+       case EEP_QUICK_DROP:
+               return pBase->miscConfiguration & BIT(1);
        default:
                return 0;
        }
@@ -3428,25 +3410,14 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
        PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
        PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
        PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+       PR_EEP("Quick Drop", modal_hdr->quick_drop);
+       PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
        PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
        PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
        PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
        PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
        PR_EEP("txClip", modal_hdr->txClip);
        PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
-       PR_EEP("Chain0 ob", modal_hdr->ob[0]);
-       PR_EEP("Chain1 ob", modal_hdr->ob[1]);
-       PR_EEP("Chain2 ob", modal_hdr->ob[2]);
-
-       PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
-       PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
-       PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
-       PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
-       PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
-       PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
-       PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
-       PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
-       PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
 
        return len;
 }
@@ -3503,6 +3474,7 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
        PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
        PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+       PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
        PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
        PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
        PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
@@ -3965,6 +3937,40 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
        }
 }
 
+static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+{
+       struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+       int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+       s32 t[3], f[3] = {5180, 5500, 5785};
+
+       if (!quick_drop)
+               return;
+
+       if (freq < 4000)
+               quick_drop = eep->modalHeader2G.quick_drop;
+       else {
+               t[0] = eep->base_ext1.quick_drop_low;
+               t[1] = eep->modalHeader5G.quick_drop;
+               t[2] = eep->base_ext1.quick_drop_high;
+               quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+       }
+       REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+}
+
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+{
+       struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+       u32 value;
+
+       value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
+                               eep->modalHeader5G.txEndToXpaOff;
+
+       REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+                     AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
+       REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+                     AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
+}
+
 static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
                                             struct ath9k_channel *chan)
 {
@@ -3972,10 +3978,12 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
        ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
        ar9003_hw_drive_strength_apply(ah);
        ar9003_hw_atten_apply(ah, chan);
+       ar9003_hw_quick_drop_apply(ah, chan->channel);
        if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
                ar9003_hw_internal_regulator_apply(ah);
        if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
                ar9003_hw_apply_tuning_caps(ah);
+       ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
 }
 
 static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4771,7 +4779,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        int i;
        u16 scaledPower = 0, minCtlPower;
        static const u16 ctlModesFor11a[] = {
@@ -4872,6 +4880,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
                        ctlNum = AR9300_NUM_CTLS_5G;
                }
 
+               twiceMaxEdgePower = MAX_RATE_POWER;
                for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
                        ath_dbg(common, ATH_DBG_REGULATORY,
                                "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
@@ -5051,6 +5060,8 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                        regulatory->max_power_level = targetPowerValT2[i];
        }
 
+       ath9k_hw_update_regulatory_maxpower(ah);
+
        if (test)
                return;
 
index 6335a86..bb223fe 100644 (file)
@@ -216,10 +216,8 @@ struct ar9300_modal_eep_header {
        u8 spurChans[AR_EEPROM_MODAL_SPURS];
        /* 3  Check if the register is per chain */
        int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
-       u8 ob[AR9300_MAX_CHAINS];
-       u8 db_stage2[AR9300_MAX_CHAINS];
-       u8 db_stage3[AR9300_MAX_CHAINS];
-       u8 db_stage4[AR9300_MAX_CHAINS];
+       u8 reserved[11];
+       int8_t quick_drop;
        u8 xpaBiasLvl;
        u8 txFrameToDataStart;
        u8 txFrameToPaOn;
@@ -269,7 +267,9 @@ struct cal_ctl_data_5g {
 
 struct ar9300_BaseExtension_1 {
        u8 ant_div_control;
-       u8 future[13];
+       u8 future[11];
+       int8_t quick_drop_low;
+       int8_t quick_drop_high;
 } __packed;
 
 struct ar9300_BaseExtension_2 {
index ccde784..95587e3 100644 (file)
@@ -175,15 +175,47 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
        u32 isr = 0;
        u32 mask2 = 0;
        struct ath9k_hw_capabilities *pCap = &ah->caps;
-       u32 sync_cause = 0;
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 sync_cause = 0, async_cause;
 
-       if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+       async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+       if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
                if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
                                == AR_RTC_STATUS_ON)
                        isr = REG_READ(ah, AR_ISR);
        }
 
+       if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+               u32 raw_intr, rx_msg_intr;
+
+               rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+               raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+               if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI gets 0xdeadbeef during MCI int processing"
+                               "new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
+                               "raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+                               raw_intr, rx_msg_intr, mci->raw_intr,
+                               mci->rx_msg_intr);
+               else {
+                       mci->rx_msg_intr |= rx_msg_intr;
+                       mci->raw_intr |= raw_intr;
+                       *masked |= ATH9K_INT_MCI;
+
+                       if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+                               mci->cont_status =
+                                       REG_READ(ah, AR_MCI_CONT_STATUS);
+
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+                       ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
+
+               }
+       }
+
        sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
 
        *masked = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644 (file)
index 0000000..8599822
--- /dev/null
@@ -0,0 +1,1464 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+       if (!AR_SREV_9462_20(ah))
+               return;
+
+       REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+                     AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+       udelay(1);
+       REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+                     AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+                                       u32 bit_position, int time_out)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       while (time_out) {
+
+               if (REG_READ(ah, address) & bit_position) {
+
+                       REG_WRITE(ah, address, bit_position);
+
+                       if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+                               if (bit_position &
+                                   AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+                                       ar9003_mci_reset_req_wakeup(ah);
+
+                               if (bit_position &
+                                   (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+                                    AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+                                       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                                       AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+                               REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                                         AR_MCI_INTERRUPT_RX_MSG);
+                       }
+                       break;
+               }
+
+               udelay(10);
+               time_out -= 10;
+
+               if (time_out < 0)
+                       break;
+       }
+
+       if (time_out <= 0) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Wait for Reg 0x%08x = 0x%08x timeout.\n",
+                       address, bit_position);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x",
+                       REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+                       REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+               time_out = 0;
+       }
+
+       return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+       u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+       ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+                               wait_done, false);
+       udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+       u32 payload = 0x00000000;
+
+       ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+                               wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+       ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+                               NULL, 0, wait_done, false);
+       udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+       ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+                               NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+       u32 payload = 0x70000000;
+
+       ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+                               wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+       ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+                               MCI_FLAG_DISABLE_TIMESTAMP,
+                               NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+                                              bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+
+       if (!mci->bt_version_known &&
+                       (mci->bt_state != MCI_BT_SLEEP)) {
+               ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version query\n");
+               MCI_GPM_SET_TYPE_OPCODE(payload,
+                               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+               ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                               wait_done, true);
+       }
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+                                                    bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex version response\n");
+       MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+                       MCI_GPM_COEX_VERSION_RESPONSE);
+       *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+               mci->wlan_ver_major;
+       *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+               mci->wlan_ver_minor;
+       ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+                                                 bool wait_done)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 *payload = &mci->wlan_channels[0];
+
+       if ((mci->wlan_channels_update == true) &&
+                       (mci->bt_state != MCI_BT_SLEEP)) {
+               MCI_GPM_SET_TYPE_OPCODE(payload,
+               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+               ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                                       wait_done, true);
+               MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+       }
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+                                               bool wait_done, u8 query_type)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+       bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+                                            MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+       if (mci->bt_state != MCI_BT_SLEEP) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Send Coex BT Status Query 0x%02X\n", query_type);
+
+               MCI_GPM_SET_TYPE_OPCODE(payload,
+                               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+               *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+               /*
+                * If bt_status_query message is  not sent successfully,
+                * then need_flush_btinfo should be set again.
+                */
+               if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                                            wait_done, true)) {
+                       if (query_btinfo) {
+                               mci->need_flush_btinfo = true;
+
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI send bt_status_query fail, "
+                                       "set flush flag again\n");
+                       }
+               }
+
+               if (query_btinfo)
+                       mci->query_bt = false;
+       }
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+                                     bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 payload[4] = {0, 0, 0, 0};
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send Coex %s BT GPM.\n",
+               (halt) ? "halt" : "unhalt");
+
+       MCI_GPM_SET_TYPE_OPCODE(payload,
+                               MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+       if (halt) {
+               mci->query_bt = true;
+               /* Send next unhalt no matter halt sent or not */
+               mci->unhalt_bt_gpm = true;
+               mci->need_flush_btinfo = true;
+               *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+                       MCI_GPM_COEX_BT_GPM_HALT;
+       } else
+               *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+                       MCI_GPM_COEX_BT_GPM_UNHALT;
+
+       ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 saved_mci_int_en;
+       u32 mci_timeout = 150;
+
+       mci->bt_state = MCI_BT_SLEEP;
+       saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                 REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                 REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+       /* Remote Reset */
+       ath_dbg(common, ATH_DBG_MCI, "MCI Reset sequence start\n");
+       ath_dbg(common, ATH_DBG_MCI, "MCI send REMOTE_RESET\n");
+       ar9003_mci_remote_reset(ah, true);
+
+       /*
+        * This delay is required for the reset delay worst case value 255 in
+        * MCI_COMMAND2 register
+        */
+
+       if (AR_SREV_9462_10(ah))
+               udelay(252);
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+       ar9003_mci_send_req_wake(ah, true);
+
+       if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+                               "MCI SYS_WAKING from remote(BT)\n");
+               mci->bt_state = MCI_BT_AWAKE;
+
+               if (AR_SREV_9462_10(ah))
+                       udelay(10);
+               /*
+                * we don't need to send more remote_reset at this moment.
+                * If BT receive first remote_reset, then BT HW will
+                * be cleaned up and will be able to receive req_wake
+                * and BT HW will respond sys_waking.
+                * In this case, WLAN will receive BT's HW sys_waking.
+                * Otherwise, if BT SW missed initial remote_reset,
+                * that remote_reset will still clean up BT MCI RX,
+                * and the req_wake will wake BT up,
+                * and BT SW will respond this req_wake with a remote_reset and
+                * sys_waking. In this case, WLAN will receive BT's SW
+                * sys_waking. In either case, BT's RX is cleaned up. So we
+                * don't need to reply BT's remote_reset now, if any.
+                * Similarly, if in any case, WLAN can receive BT's sys_waking,
+                * that means WLAN's RX is also fine.
+                */
+
+               /* Send SYS_WAKING to BT */
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI send SW SYS_WAKING to remote BT\n");
+
+               ar9003_mci_send_sys_waking(ah, true);
+               udelay(10);
+
+               /*
+                * Set BT priority interrupt value to be 0xff to
+                * avoid having too many BT PRIORITY interrupts.
+                */
+
+               REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+               REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+               /*
+                * A contention reset will be received after send out
+                * sys_waking. Also BT priority interrupt bits will be set.
+                * Clear those bits before the next step.
+                */
+
+               REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                         AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+               REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                         AR_MCI_INTERRUPT_BT_PRI);
+
+               if (AR_SREV_9462_10(ah) || mci->is_2g) {
+                       /* Send LNA_TRANS */
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI send LNA_TRANS to BT\n");
+                       ar9003_mci_send_lna_transfer(ah, true);
+                       udelay(5);
+               }
+
+               if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+                                           !mci->update_2g5g)) {
+                       if (ar9003_mci_wait_for_interrupt(ah,
+                               AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+                               mci_timeout))
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI WLAN has control over the LNA & "
+                                       "BT obeys it\n");
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI BT didn't respond to"
+                                       "LNA_TRANS\n");
+               }
+
+               if (AR_SREV_9462_10(ah)) {
+                       /* Send another remote_reset to deassert BT clk_req. */
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI another remote_reset to "
+                               "deassert clk_req\n");
+                       ar9003_mci_remote_reset(ah, true);
+                       udelay(252);
+               }
+       }
+
+       /* Clear the extra redundant SYS_WAKING from BT */
+       if ((mci->bt_state == MCI_BT_AWAKE) &&
+               (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+               (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                               AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                                 AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+                       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                                 AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+       }
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+                 AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+       u32 intr;
+
+       intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+       return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+                             u32 *rx_msg_intr)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       *raw_intr = mci->raw_intr;
+       *rx_msg_intr = mci->rx_msg_intr;
+
+       /* Clean int bits after the values are read. */
+       mci->raw_intr = 0;
+       mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+       if (!mci->update_2g5g &&
+           (mci->is_2g != is_2g))
+               mci->update_2g5g = true;
+
+       mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 *payload;
+       u32 recv_type, offset;
+
+       if (msg_index == MCI_GPM_INVALID)
+               return false;
+
+       offset = msg_index << 4;
+
+       payload = (u32 *)(mci->gpm_buf + offset);
+       recv_type = MCI_GPM_TYPE(payload);
+
+       if (recv_type == MCI_GPM_RSVD_PATTERN) {
+               ath_dbg(common, ATH_DBG_MCI, "MCI Skip RSVD GPM\n");
+               return false;
+       }
+
+       return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+               ath9k_hw_cfg_output(ah, 3,
+                                       AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+       } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+               ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+       } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+               ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+               ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+               ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+               ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+       } else
+               return;
+
+       REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+       if (AR_SREV_9462_20_OR_LATER(ah)) {
+               REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+                             AR_GLB_DS_JTAG_DISABLE, 1);
+               REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+                             AR_GLB_WLAN_UART_INTF_EN, 0);
+               REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+                           ATH_MCI_CONFIG_MCI_OBS_GPIO);
+       }
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+       REG_WRITE(ah, AR_OBS, 0x4b);
+       REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+       REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+       REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+       REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+       REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+                     AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+                                               u8 opcode, u32 bt_flags)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 pld[4] = {0, 0, 0, 0};
+
+       MCI_GPM_SET_TYPE_OPCODE(pld,
+                       MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+       *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP)  = opcode;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+       *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+       ath_dbg(common, ATH_DBG_MCI,
+               "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+               (opcode == MCI_GPM_COEX_BT_FLAGS_READ) ? "READ" :
+               ((opcode == MCI_GPM_COEX_BT_FLAGS_SET) ? "SET" : "CLEAR"),
+                                                               bt_flags);
+
+       return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+                                                       wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                     bool is_full_sleep)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 regval, thresh;
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI full_sleep = %d, is_2g = %d\n",
+               is_full_sleep, is_2g);
+
+       /*
+        * GPM buffer and scheduling message buffer are not allocated
+        */
+
+       if (!mci->gpm_addr && !mci->sched_addr) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI GPM and schedule buffers are not allocated");
+               return;
+       }
+
+       if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI it's deadbeef, quit mci_reset\n");
+               return;
+       }
+
+       /* Program MCI DMA related registers */
+       REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+       REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+       REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+       /*
+       * To avoid MCI state machine be affected by incoming remote MCI msgs,
+       * MCI mode will be enabled later, right before reset the MCI TX and RX.
+       */
+
+       regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+                SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+                SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+                SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+                SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+                SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+                SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+                SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+       if (is_2g && (AR_SREV_9462_20(ah)) &&
+               !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+               regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+               ath_dbg(common, ATH_DBG_MCI,
+                               "MCI sched one step look ahead\n");
+
+               if (!(mci->config &
+                     ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+                       thresh = MS(mci->config,
+                                   ATH_MCI_CONFIG_AGGR_THRESH);
+                       thresh &= 7;
+                       regval |= SM(1,
+                                    AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+                       regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+                       REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+                                     AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+                       REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+                                     AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI sched aggr thresh: off\n");
+       } else
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI SCHED one step look ahead off\n");
+
+       if (AR_SREV_9462_10(ah))
+               regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+       REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+       if (AR_SREV_9462_20(ah)) {
+               REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+                           AR_BTCOEX_CTRL_SPDT_ENABLE);
+               REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+                             AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+       }
+
+       REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+       REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+       thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+       REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+       REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+       /* Resetting the Rx and Tx paths of MCI */
+       regval = REG_READ(ah, AR_MCI_COMMAND2);
+       regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+       udelay(1);
+
+       regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+       if (is_full_sleep) {
+               ar9003_mci_mute_bt(ah);
+               udelay(100);
+       }
+
+       regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+       udelay(1);
+       regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+       REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+       ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+       REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+                 (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+                  SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+       REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+                       AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+       if (AR_SREV_9462_20_OR_LATER(ah))
+               ar9003_mci_observation_set_up(ah);
+
+       mci->ready = true;
+       ar9003_mci_prep_interface(ah);
+
+       if (en_int)
+               ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       /* disable all MCI messages */
+       REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+       REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+       REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+       /* wait pending HW messages to flush out */
+       udelay(10);
+
+       /*
+        * Send LNA_TAKE and SYS_SLEEPING when
+        * 1. reset not after resuming from full sleep
+        * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+        */
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
+       ar9003_mci_send_lna_take(ah, true);
+
+       udelay(5);
+
+       ath_dbg(common, ATH_DBG_MCI, "MCI Send sys sleeping\n");
+       ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 cur_bt_state;
+
+       cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+       if (mci->bt_state != cur_bt_state) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT state mismatches. old: %d, new: %d\n",
+                       mci->bt_state, cur_bt_state);
+               mci->bt_state = cur_bt_state;
+       }
+
+       if (mci->bt_state != MCI_BT_SLEEP) {
+
+               ar9003_mci_send_coex_version_query(ah, true);
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+
+               if (mci->unhalt_bt_gpm == true) {
+                       ath_dbg(common, ATH_DBG_MCI, "MCI unhalt BT GPM");
+                       ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+               }
+       }
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 new_flags, to_set, to_clear;
+
+       if (AR_SREV_9462_20(ah) &&
+           mci->update_2g5g &&
+           (mci->bt_state != MCI_BT_SLEEP)) {
+
+               if (mci->is_2g) {
+                       new_flags = MCI_2G_FLAGS;
+                       to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+                       to_set = MCI_2G_FLAGS_SET_MASK;
+               } else {
+                       new_flags = MCI_5G_FLAGS;
+                       to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+                       to_set = MCI_5G_FLAGS_SET_MASK;
+               }
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+               mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+               if (to_clear)
+                       ar9003_mci_send_coex_bt_flags(ah, wait_done,
+                                       MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+               if (to_set)
+                       ar9003_mci_send_coex_bt_flags(ah, wait_done,
+                                       MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+       }
+
+       if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+               mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+                                       u32 *payload, bool queue)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u8 type, opcode;
+
+       if (queue) {
+
+               if (payload)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+                               header,
+                               *(((u8 *)payload) + 4),
+                               *(((u8 *)payload) + 5),
+                               *(((u8 *)payload) + 6));
+               else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI ERROR: Send fail: %02x\n", header);
+       }
+
+       /* check if the message is to be queued */
+       if (header != MCI_GPM)
+               return;
+
+       type = MCI_GPM_TYPE(payload);
+       opcode = MCI_GPM_OPCODE(payload);
+
+       if (type != MCI_GPM_COEX_AGENT)
+               return;
+
+       switch (opcode) {
+       case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+               if (AR_SREV_9462_10(ah))
+                       break;
+
+               if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+                               MCI_GPM_COEX_BT_FLAGS_READ)
+                       break;
+
+               mci->update_2g5g = queue;
+
+               if (queue)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT_MCI_FLAGS: 2G5G status <queued> %s.\n",
+                               mci->is_2g ? "2G" : "5G");
+               else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT_MCI_FLAGS: 2G5G status <sent> %s.\n",
+                               mci->is_2g ? "2G" : "5G");
+
+               break;
+
+       case MCI_GPM_COEX_WLAN_CHANNELS:
+
+               mci->wlan_channels_update = queue;
+               if (queue)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI WLAN channel map <queued>\n");
+               else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI WLAN channel map <sent>\n");
+               break;
+
+       case MCI_GPM_COEX_HALT_BT_GPM:
+
+               if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+                               MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+                       mci->unhalt_bt_gpm = queue;
+
+                       if (queue)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI UNHALT BT GPM <queued>\n");
+                       else {
+                               mci->halted_bt_gpm = false;
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI UNHALT BT GPM <sent>\n");
+                       }
+               }
+
+               if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+                               MCI_GPM_COEX_BT_GPM_HALT) {
+
+                       mci->halted_bt_gpm = !queue;
+
+                       if (queue)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI HALT BT GPM <not sent>\n");
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI UNHALT BT GPM <sent>\n");
+               }
+
+               break;
+       default:
+               break;
+       }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+       if (mci->update_2g5g) {
+               if (mci->is_2g) {
+
+                       ar9003_mci_send_2g5g_status(ah, true);
+                       ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA trans\n");
+                       ar9003_mci_send_lna_transfer(ah, true);
+                       udelay(5);
+
+                       REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+                                   AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+                       if (AR_SREV_9462_20(ah)) {
+                               REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+                                           AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+                               if (!(mci->config &
+                                     ATH_MCI_CONFIG_DISABLE_OSLA)) {
+                                       REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+                                       AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+                               }
+                       }
+               } else {
+                       ath_dbg(common, ATH_DBG_MCI, "MCI Send LNA take\n");
+                       ar9003_mci_send_lna_take(ah, true);
+                       udelay(5);
+
+                       REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+                                   AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+                       if (AR_SREV_9462_20(ah)) {
+                               REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+                                           AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+                               REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+                                       AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+                       }
+
+                       ar9003_mci_send_2g5g_status(ah, true);
+               }
+       }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+                            u32 *payload, u8 len, bool wait_done,
+                            bool check_bt)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       bool msg_sent = false;
+       u32 regval;
+       u32 saved_mci_int_en;
+       int i;
+
+       saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+       regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+       if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Not sending 0x%x. MCI is not enabled. "
+                       "full_sleep = %d\n", header,
+                       (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+               return false;
+
+       } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+               ath_dbg(common, ATH_DBG_MCI,
+               "MCI Don't send message 0x%x. BT is in sleep state\n", header);
+
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+               return false;
+       }
+
+       if (wait_done)
+               REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+       /* Need to clear SW_MSG_DONE raw bit before wait */
+
+       REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+                 (AR_MCI_INTERRUPT_SW_MSG_DONE |
+                  AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+       if (payload) {
+               for (i = 0; (i * 4) < len; i++)
+                       REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+                                 *(payload + i));
+       }
+
+       REG_WRITE(ah, AR_MCI_COMMAND0,
+                 (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+                     AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+                  SM(len, AR_MCI_COMMAND0_LEN) |
+                  SM(header, AR_MCI_COMMAND0_HEADER)));
+
+       if (wait_done &&
+           !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+                                       AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+       else {
+               ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+               msg_sent = true;
+       }
+
+       if (wait_done)
+               REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+       return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                     u16 len, u32 sched_addr)
+{
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+       mci->gpm_addr = gpm_addr;
+       mci->gpm_buf = gpm_buf;
+       mci->gpm_len = len;
+       mci->sched_addr = sched_addr;
+       mci->sched_buf = sched_buf;
+
+       ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       /* Turn off MCI and Jupiter mode. */
+       REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+       ath_dbg(common, ATH_DBG_MCI, "MCI ar9003_mci_cleanup\n");
+       ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+                                        u8 gpm_opcode, u32 *p_gpm)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u8 *p_data = (u8 *) p_gpm;
+
+       if (gpm_type != MCI_GPM_COEX_AGENT)
+               return;
+
+       switch (gpm_opcode) {
+       case MCI_GPM_COEX_VERSION_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Query\n");
+               ar9003_mci_send_coex_version_response(ah, true);
+               break;
+       case MCI_GPM_COEX_VERSION_RESPONSE:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Response\n");
+               mci->bt_ver_major =
+                       *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+               mci->bt_ver_minor =
+                       *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+               mci->bt_version_known = true;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT Coex version: %d.%d\n",
+                       mci->bt_ver_major,
+                       mci->bt_ver_minor);
+               break;
+       case MCI_GPM_COEX_STATUS_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Status Query = 0x%02X.\n",
+                       *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+               mci->wlan_channels_update = true;
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+               break;
+       case MCI_GPM_COEX_BT_PROFILE_INFO:
+               mci->query_bt = true;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX BT_Profile_Info\n");
+               break;
+       case MCI_GPM_COEX_BT_STATUS_UPDATE:
+               mci->query_bt = true;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX BT_Status_Update "
+                       "SEQ=%d (drop&query)\n", *(p_gpm + 3));
+               break;
+       default:
+               break;
+       }
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+                           u8 gpm_opcode, int time_out)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 *p_gpm = NULL, mismatch = 0, more_data;
+       u32 offset;
+       u8 recv_type = 0, recv_opcode = 0;
+       bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+       more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+       while (time_out > 0) {
+               if (p_gpm) {
+                       MCI_GPM_RECYCLE(p_gpm);
+                       p_gpm = NULL;
+               }
+
+               if (more_data != MCI_GPM_MORE)
+                       time_out = ar9003_mci_wait_for_interrupt(ah,
+                                       AR_MCI_INTERRUPT_RX_MSG_RAW,
+                                       AR_MCI_INTERRUPT_RX_MSG_GPM,
+                                       time_out);
+
+               if (!time_out)
+                       break;
+
+               offset = ar9003_mci_state(ah,
+                               MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+               if (offset == MCI_GPM_INVALID)
+                       continue;
+
+               p_gpm = (u32 *) (mci->gpm_buf + offset);
+               recv_type = MCI_GPM_TYPE(p_gpm);
+               recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+               if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+                       if (recv_type == gpm_type) {
+
+                               if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+                                   !b_is_bt_cal_done) {
+                                       gpm_type = MCI_GPM_BT_CAL_GRANT;
+                                       ath_dbg(common, ATH_DBG_MCI,
+                                               "MCI Recv BT_CAL_DONE"
+                                               "wait BT_CAL_GRANT\n");
+                                       continue;
+                               }
+
+                               break;
+                       }
+               } else if ((recv_type == gpm_type) &&
+                          (recv_opcode == gpm_opcode))
+                       break;
+
+               /* not expected message */
+
+               /*
+                * check if it's cal_grant
+                *
+                * When we're waiting for cal_grant in reset routine,
+                * it's possible that BT sends out cal_request at the
+                * same time. Since BT's calibration doesn't happen
+                * that often, we'll let BT completes calibration then
+                * we continue to wait for cal_grant from BT.
+                * Orginal: Wait BT_CAL_GRANT.
+                * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+                * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+                */
+
+               if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+                   (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+                       u32 payload[4] = {0, 0, 0, 0};
+
+                       gpm_type = MCI_GPM_BT_CAL_DONE;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+                       MCI_GPM_SET_CAL_TYPE(payload,
+                                       MCI_GPM_WLAN_CAL_GRANT);
+
+                       ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+                                               false, false);
+
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI now wait for BT_CAL_DONE\n");
+
+                       continue;
+               } else {
+                       ath_dbg(common, ATH_DBG_MCI, "MCI GPM subtype"
+                                       "not match 0x%x\n", *(p_gpm + 1));
+                       mismatch++;
+                       ar9003_mci_process_gpm_extra(ah, recv_type,
+                                       recv_opcode, p_gpm);
+               }
+       }
+       if (p_gpm) {
+               MCI_GPM_RECYCLE(p_gpm);
+               p_gpm = NULL;
+       }
+
+       if (time_out <= 0) {
+               time_out = 0;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI GPM received timeout, mismatch = %d\n", mismatch);
+       } else
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Receive GPM type=0x%x, code=0x%x\n",
+                       gpm_type, gpm_opcode);
+
+       while (more_data == MCI_GPM_MORE) {
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI discard remaining GPM\n");
+               offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+                                         &more_data);
+
+               if (offset == MCI_GPM_INVALID)
+                       break;
+
+               p_gpm = (u32 *) (mci->gpm_buf + offset);
+               recv_type = MCI_GPM_TYPE(p_gpm);
+               recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+               if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+                       ar9003_mci_process_gpm_extra(ah, recv_type,
+                                                    recv_opcode, p_gpm);
+
+               MCI_GPM_RECYCLE(p_gpm);
+       }
+
+       return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+       u32 value = 0, more_gpm = 0, gpm_ptr;
+       u8 query_type;
+
+       switch (state_type) {
+       case MCI_STATE_ENABLE:
+               if (mci->ready) {
+
+                       value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+                       if ((value == 0xdeadbeef) || (value == 0xffffffff))
+                               value = 0;
+               }
+               value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+               break;
+       case MCI_STATE_INIT_GPM_OFFSET:
+               value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI GPM initial WRITE_PTR=%d\n", value);
+               mci->gpm_idx = value;
+               break;
+       case MCI_STATE_NEXT_GPM_OFFSET:
+       case MCI_STATE_LAST_GPM_OFFSET:
+               /*
+               * This could be useful to avoid new GPM message interrupt which
+               * may lead to spurious interrupt after power sleep, or multiple
+               * entry of ath_mci_intr().
+               * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+               * alleviate this effect, but clearing GPM RX interrupt bit is
+               * safe, because whether this is called from hw or driver code
+               * there must be an interrupt bit set/triggered initially
+               */
+               REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                         AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+               gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+               value = gpm_ptr;
+
+               if (value == 0)
+                       value = mci->gpm_len - 1;
+               else if (value >= mci->gpm_len) {
+                       if (value != 0xFFFF) {
+                               value = 0;
+                               ath_dbg(common, ATH_DBG_MCI, "MCI GPM offset"
+                                       "out of range\n");
+                       }
+               } else
+                       value--;
+
+               if (value == 0xFFFF) {
+                       value = MCI_GPM_INVALID;
+                       more_gpm = MCI_GPM_NOMORE;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI GPM ptr invalid"
+                               "@ptr=%d, offset=%d, more=GPM_NOMORE\n",
+                               gpm_ptr, value);
+               } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+                       if (gpm_ptr == mci->gpm_idx) {
+                               value = MCI_GPM_INVALID;
+                               more_gpm = MCI_GPM_NOMORE;
+
+                               ath_dbg(common, ATH_DBG_MCI, "MCI GPM message"
+                                       "not available @ptr=%d, @offset=%d,"
+                                       "more=GPM_NOMORE\n", gpm_ptr, value);
+                       } else {
+                               for (;;) {
+
+                                       u32 temp_index;
+
+                                       /* skip reserved GPM if any */
+
+                                       if (value != mci->gpm_idx)
+                                               more_gpm = MCI_GPM_MORE;
+                                       else
+                                               more_gpm = MCI_GPM_NOMORE;
+
+                                       temp_index = mci->gpm_idx;
+                                       mci->gpm_idx++;
+
+                                       if (mci->gpm_idx >=
+                                           mci->gpm_len)
+                                               mci->gpm_idx = 0;
+
+                                       ath_dbg(common, ATH_DBG_MCI,
+                                               "MCI GPM message got ptr=%d,"
+                                               "@offset=%d, more=%d\n",
+                                               gpm_ptr, temp_index,
+                                               (more_gpm == MCI_GPM_MORE));
+
+                                       if (ar9003_mci_is_gpm_valid(ah,
+                                                               temp_index)) {
+                                               value = temp_index;
+                                               break;
+                                       }
+
+                                       if (more_gpm == MCI_GPM_NOMORE) {
+                                               value = MCI_GPM_INVALID;
+                                               break;
+                                       }
+                               }
+                       }
+                       if (p_data)
+                               *p_data = more_gpm;
+                       }
+
+                       if (value != MCI_GPM_INVALID)
+                               value <<= 4;
+
+                       break;
+       case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+               value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+                                   AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+               /* Make it in bytes */
+               value <<= 4;
+               break;
+
+       case MCI_STATE_REMOTE_SLEEP:
+               value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+                          AR_MCI_RX_REMOTE_SLEEP) ?
+                       MCI_BT_SLEEP : MCI_BT_AWAKE;
+               break;
+
+       case MCI_STATE_CONT_RSSI_POWER:
+               value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+                       break;
+
+       case MCI_STATE_CONT_PRIORITY:
+               value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+               break;
+
+       case MCI_STATE_CONT_TXRX:
+               value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+               break;
+
+       case MCI_STATE_BT:
+               value = mci->bt_state;
+               break;
+
+       case MCI_STATE_SET_BT_SLEEP:
+               mci->bt_state = MCI_BT_SLEEP;
+               break;
+
+       case MCI_STATE_SET_BT_AWAKE:
+               mci->bt_state = MCI_BT_AWAKE;
+               ar9003_mci_send_coex_version_query(ah, true);
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+
+               if (mci->unhalt_bt_gpm) {
+
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI unhalt BT GPM\n");
+                       ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+               }
+
+               ar9003_mci_2g5g_switch(ah, true);
+               break;
+
+       case MCI_STATE_SET_BT_CAL_START:
+               mci->bt_state = MCI_BT_CAL_START;
+               break;
+
+       case MCI_STATE_SET_BT_CAL:
+               mci->bt_state = MCI_BT_CAL;
+               break;
+
+       case MCI_STATE_RESET_REQ_WAKE:
+               ar9003_mci_reset_req_wakeup(ah);
+               mci->update_2g5g = true;
+
+               if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+                   (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+                       /* Check if we still have control of the GPIOs */
+                       if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+                                     ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+                                       ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI reconfigure observation");
+                               ar9003_mci_observation_set_up(ah);
+                       }
+               }
+               break;
+
+       case MCI_STATE_SEND_WLAN_COEX_VERSION:
+               ar9003_mci_send_coex_version_response(ah, true);
+               break;
+
+       case MCI_STATE_SET_BT_COEX_VERSION:
+
+               if (!p_data)
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI Set BT Coex version with NULL data!!\n");
+               else {
+                       mci->bt_ver_major = (*p_data >> 8) & 0xff;
+                       mci->bt_ver_minor = (*p_data) & 0xff;
+                       mci->bt_version_known = true;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT version set: %d.%d\n",
+                               mci->bt_ver_major,
+                               mci->bt_ver_minor);
+               }
+               break;
+
+       case MCI_STATE_SEND_WLAN_CHANNELS:
+               if (p_data) {
+                       if (((mci->wlan_channels[1] & 0xffff0000) ==
+                            (*(p_data + 1) & 0xffff0000)) &&
+                           (mci->wlan_channels[2] == *(p_data + 2)) &&
+                           (mci->wlan_channels[3] == *(p_data + 3)))
+                               break;
+
+                       mci->wlan_channels[0] = *p_data++;
+                       mci->wlan_channels[1] = *p_data++;
+                       mci->wlan_channels[2] = *p_data++;
+                       mci->wlan_channels[3] = *p_data++;
+               }
+               mci->wlan_channels_update = true;
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+               break;
+
+       case MCI_STATE_SEND_VERSION_QUERY:
+               ar9003_mci_send_coex_version_query(ah, true);
+               break;
+
+       case MCI_STATE_SEND_STATUS_QUERY:
+               query_type = (AR_SREV_9462_10(ah)) ?
+                               MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+                               MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+               ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+               break;
+
+       case MCI_STATE_NEED_FLUSH_BT_INFO:
+                       /*
+                        * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+                        * needed to send UNHALT message. It's set whenever
+                        * there's a request to send HALT message.
+                        * mci_halted_bt_gpm means whether HALT message is sent
+                        * out successfully.
+                        *
+                        * Checking (mci_unhalt_bt_gpm == false) instead of
+                        * checking (ah->mci_halted_bt_gpm == false) will make
+                        * sure currently is in UNHALT-ed mode and BT can
+                        * respond to status query.
+                        */
+                       value = (!mci->unhalt_bt_gpm &&
+                                mci->need_flush_btinfo) ? 1 : 0;
+                       if (p_data)
+                               mci->need_flush_btinfo =
+                                       (*p_data != 0) ? true : false;
+                       break;
+
+       case MCI_STATE_RECOVER_RX:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI hw RECOVER_RX\n");
+               ar9003_mci_prep_interface(ah);
+               mci->query_bt = true;
+               mci->need_flush_btinfo = true;
+               ar9003_mci_send_coex_wlan_channels(ah, true);
+               ar9003_mci_2g5g_switch(ah, true);
+               break;
+
+       case MCI_STATE_NEED_FTP_STOMP:
+               value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+               break;
+
+       case MCI_STATE_NEED_TUNING:
+               value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+               break;
+
+       default:
+               break;
+
+       }
+
+       return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644 (file)
index 0000000..798da11
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP      0x00000001      /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT  3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT  0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN     3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN     0
+
+enum mci_gpm_coex_query_type {
+       MCI_GPM_COEX_QUERY_BT_ALL_INFO      = BIT(0),
+       MCI_GPM_COEX_QUERY_BT_TOPOLOGY      = BIT(1),
+       MCI_GPM_COEX_QUERY_BT_DEBUG         = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+       MCI_GPM_COEX_BT_GPM_UNHALT,
+       MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+       MCI_GPM_COEX_BT_FLAGS_READ,
+       MCI_GPM_COEX_BT_FLAGS_SET,
+       MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS     79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR          0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR           0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD           0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL             0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG                0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG            0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG             0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM             0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG          0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE             0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE          0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER                0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS              0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS  (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+                                 MCI_BT_MCI_FLAGS_UPDATE_HDR  | \
+                                 MCI_BT_MCI_FLAGS_UPDATE_PLD  | \
+                                 MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK   0x00000000
+#define MCI_2G_FLAGS_SET_MASK     MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS              MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK   MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK     0x00000000
+#define MCI_5G_FLAGS              (MCI_DEFAULT_BT_MCI_FLAGS & \
+                                  ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX            0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI          0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX         0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT           0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL      0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA         0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP    0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH          0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S        8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH  0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV              0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S            12
+#define ATH_MCI_CONFIG_DISABLE_TUNING       0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG       0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI          0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK     (ATH_MCI_CONFIG_MCI_OBS_MCI  | \
+                                        ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+                                        ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO     0x0000002F
+
+#endif
index 2330e7e..e41d269 100644 (file)
@@ -199,12 +199,14 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
                        synth_freq = chan->channel;
                }
        } else {
-               range = 10;
+               range = AR_SREV_9462(ah) ? 5 : 10;
                max_spur_cnts = 4;
                synth_freq = chan->channel;
        }
 
        for (i = 0; i < max_spur_cnts; i++) {
+               if (AR_SREV_9462(ah) && (i == 0 || i == 3))
+                       continue;
                negative = 0;
                if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
                        cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
index 4114fe7..ed64114 100644 (file)
 #define AR_PHY_DAG_CTRLCCK_RSSI_THR_S   10
 
 #define AR_PHY_RIFS_INIT_DELAY         0x3ff0000
+#define AR_PHY_AGC_QUICK_DROP       0x03c00000
+#define AR_PHY_AGC_QUICK_DROP_S     22
 #define AR_PHY_AGC_COARSE_LOW       0x00007F80
 #define AR_PHY_AGC_COARSE_LOW_S     7
 #define AR_PHY_AGC_COARSE_HIGH      0x003F8000
 #define AR_PHY_TEST_CTL_TSTADC_EN_S       8
 #define AR_PHY_TEST_CTL_RX_OBS_SEL        0x3C00
 #define AR_PHY_TEST_CTL_RX_OBS_SEL_S      10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL    0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S          29
 
 
 #define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
 
 /* GLB Registers */
 #define AR_GLB_BASE    0x20000
+#define AR_GLB_GPIO_CONTROL    (AR_GLB_BASE)
 #define AR_PHY_GLB_CONTROL     (AR_GLB_BASE + 0x44)
 #define AR_GLB_SCRATCH(_ah)    (AR_GLB_BASE + \
                                        (AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
index 9c51b39..dc2054f 100644 (file)
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
 
 static const u32 ar9462_2p0_baseband_postamble[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
-       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
-       {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
-       {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+       {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+       {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
        {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
        {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
        {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
        {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
        {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
-       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
-       {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
-       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+       {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
        {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
-       {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
        {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+       {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+       {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+       {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+       {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
        {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
        {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
        {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -688,8 +697,8 @@ static const u32 ar9462_2p0_mac_postamble_emulation[][5] = {
 static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
-       {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
-       {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+       {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+       {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
 static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
@@ -717,8 +726,8 @@ static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
 static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
-       {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
-       {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+       {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+       {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
 };
 
 static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
@@ -1059,7 +1068,7 @@ static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
 
 static const u32 ar9462_2p0_soc_postamble[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
-       {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+       {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
 };
 
 static const u32 ar9462_2p0_baseband_core[][2] = {
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
        {0x00009e30, 0x06336f77},
        {0x00009e34, 0x6af6532f},
        {0x00009e38, 0x0cc80c00},
-       {0x00009e40, 0x0d261820},
+       {0x00009e40, 0x15262820},
        {0x00009e4c, 0x00001004},
        {0x00009e50, 0x00ff03f1},
-       {0x00009e54, 0xe4c355c7},
-       {0x00009e58, 0xfd897735},
+       {0x00009e54, 0xe4c555c2},
+       {0x00009e58, 0xfd857722},
        {0x00009e5c, 0xe9198724},
        {0x00009fc0, 0x803e4788},
        {0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
        {0x0000a398, 0x001f0e0f},
        {0x0000a39c, 0x0075393f},
        {0x0000a3a0, 0xb79f6427},
-       {0x0000a3a4, 0x00000000},
-       {0x0000a3a8, 0xaaaaaaaa},
-       {0x0000a3ac, 0x3c466478},
        {0x0000a3c0, 0x20202020},
        {0x0000a3c4, 0x22222220},
        {0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
        {0x0000a40c, 0x00820820},
        {0x0000a414, 0x1ce739ce},
        {0x0000a418, 0x2d001dce},
-       {0x0000a41c, 0x1ce739ce},
-       {0x0000a420, 0x000001ce},
-       {0x0000a424, 0x1ce739ce},
-       {0x0000a428, 0x000001ce},
-       {0x0000a42c, 0x1ce739ce},
-       {0x0000a430, 0x1ce739ce},
        {0x0000a434, 0x00000000},
        {0x0000a438, 0x00001801},
        {0x0000a43c, 0x00100000},
@@ -1257,8 +1257,8 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
        {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
        {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
        {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
-       {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
-       {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+       {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+       {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
        {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
        {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
        {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
@@ -1850,8 +1850,8 @@ static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = {
        {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
        {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
        {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
-       {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
-       {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+       {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+       {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
        {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
        {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
        {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
index 1c269f5..afc156a 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "debug.h"
 #include "common.h"
+#include "mci.h"
 
 /*
  * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -252,6 +253,7 @@ struct ath_node {
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct list_head list; /* for sc->nodes */
        struct ieee80211_sta *sta; /* station struct we're part of */
+       struct ieee80211_vif *vif; /* interface with which we're associated */
 #endif
        struct ath_atx_tid tid[WME_NUM_TID];
        struct ath_atx_ac ac[WME_NUM_AC];
@@ -443,7 +445,9 @@ struct ath_btcoex {
        u32 btcoex_no_stomp; /* in usec */
        u32 btcoex_period; /* in usec */
        u32 btscan_no_stomp; /* in usec */
+       u32 duty_cycle;
        struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
+       struct ath_mci_profile mci;
 };
 
 int ath_init_btcoex_timer(struct ath_softc *sc);
@@ -458,7 +462,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
 #define ATH_LED_PIN_9287               8
 #define ATH_LED_PIN_9300               10
 #define ATH_LED_PIN_9485               6
-#define ATH_LED_PIN_9462               0
+#define ATH_LED_PIN_9462               4
 
 #ifdef CONFIG_MAC80211_LEDS
 void ath_init_leds(struct ath_softc *sc);
@@ -643,6 +647,7 @@ struct ath_softc {
        struct delayed_work tx_complete_work;
        struct delayed_work hw_pll_work;
        struct ath_btcoex btcoex;
+       struct ath_mci_coex mci_coex;
 
        struct ath_descdma txsdma;
 
index 0122639..bbb2081 100644 (file)
@@ -21,7 +21,7 @@ enum ath_bt_mode {
        ATH_BT_COEX_MODE_LEGACY,        /* legacy rx_clear mode */
        ATH_BT_COEX_MODE_UNSLOTTED,     /* untimed/unslotted mode */
        ATH_BT_COEX_MODE_SLOTTED,       /* slotted mode */
-       ATH_BT_COEX_MODE_DISALBED,      /* coexistence disabled */
+       ATH_BT_COEX_MODE_DISABLED,      /* coexistence disabled */
 };
 
 struct ath_btcoex_config {
@@ -36,6 +36,20 @@ struct ath_btcoex_config {
        bool bt_hold_rx_clear;
 };
 
+static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+                                   [AR9300_NUM_WLAN_WEIGHTS] = {
+       { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
+       { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
+       { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
+};
+
+static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+                                   [AR9300_NUM_WLAN_WEIGHTS] = {
+       { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
+       { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
+       { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
+       { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+};
 
 void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
 {
@@ -152,27 +166,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
 
 static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
 {
-       struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+       struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
        u32  val;
+       int i;
 
        /*
         * Program coex mode and weight registers to
         * enable coex 3-wire
         */
-       REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
-       REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
+       REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
+       REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
 
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
-               REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
-               REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
-               REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
-               REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
-               REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
-               REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
-
+               REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
+               REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
+               for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+                       REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
+                                 btcoex->bt_weight[i]);
        } else
-               REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
+               REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
 
 
 
@@ -185,10 +198,23 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
        REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
        REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
 
-       ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+       ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
                            AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
 }
 
+static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
+{
+       struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+       int i;
+
+       for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+               REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+                         btcoex->wlan_weight[i]);
+
+       REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+       btcoex->enabled = true;
+}
+
 void ath9k_hw_btcoex_enable(struct ath_hw *ah)
 {
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@@ -202,6 +228,9 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
        case ATH_BTCOEX_CFG_3WIRE:
                ath9k_hw_btcoex_enable_3wire(ah);
                break;
+       case ATH_BTCOEX_CFG_MCI:
+               ath9k_hw_btcoex_enable_mci(ah);
+               return;
        }
 
        REG_RMW(ah, AR_GPIO_PDPU,
@@ -215,7 +244,15 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
 void ath9k_hw_btcoex_disable(struct ath_hw *ah)
 {
        struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
-
+       int i;
+
+       btcoex_hw->enabled = false;
+       if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
+               ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+               for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+                       REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+                                 btcoex_hw->wlan_weight[i]);
+       }
        ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
 
        ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@@ -228,49 +265,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
                if (AR_SREV_9300_20_OR_LATER(ah)) {
                        REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
                        REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
-                       REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
-                       REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
-                       REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
-                       REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
+                       for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+                               REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
                } else
                        REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
 
        }
-
-       ah->btcoex_hw.enabled = false;
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
 
 static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
                         enum ath_stomp_type stomp_type)
 {
-       ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
-       ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
-       ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
-       ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
-
-
-       switch (stomp_type) {
-       case ATH_BTCOEX_STOMP_ALL:
-               ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
-               ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
-               break;
-       case ATH_BTCOEX_STOMP_LOW:
-               ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
-               ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
-               break;
-       case ATH_BTCOEX_STOMP_NONE:
-               ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
-               ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
-               break;
-
-       default:
-               ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
-                               "Invalid Stomptype\n");
-               break;
+       struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+       const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
+                                              ar9462_wlan_weights[stomp_type];
+       int i;
+
+       for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+               btcoex->bt_weight[i] = AR9300_BT_WGHT;
+               btcoex->wlan_weight[i] = weight[i];
        }
-
-       ath9k_hw_btcoex_enable(ah);
 }
 
 /*
@@ -302,7 +317,5 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
                                "Invalid Stomptype\n");
                break;
        }
-
-       ath9k_hw_btcoex_enable(ah);
 }
 EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
index 234f776..278361c 100644 (file)
 #define ATH_BT_CNT_THRESHOLD          3
 #define ATH_BT_CNT_SCAN_THRESHOLD      15
 
+#define AR9300_NUM_BT_WEIGHTS   4
+#define AR9300_NUM_WLAN_WEIGHTS 4
 /* Defines the BT AR_BT_COEX_WGHT used */
 enum ath_stomp_type {
-       ATH_BTCOEX_NO_STOMP,
        ATH_BTCOEX_STOMP_ALL,
        ATH_BTCOEX_STOMP_LOW,
-       ATH_BTCOEX_STOMP_NONE
+       ATH_BTCOEX_STOMP_NONE,
+       ATH_BTCOEX_STOMP_LOW_FTP,
+       ATH_BTCOEX_STOMP_MAX
 };
 
 enum ath_btcoex_scheme {
        ATH_BTCOEX_CFG_NONE,
        ATH_BTCOEX_CFG_2WIRE,
        ATH_BTCOEX_CFG_3WIRE,
+       ATH_BTCOEX_CFG_MCI,
+};
+
+struct ath9k_hw_mci {
+       u32 raw_intr;
+       u32 rx_msg_intr;
+       u32 cont_status;
+       u32 gpm_addr;
+       u32 gpm_len;
+       u32 gpm_idx;
+       u32 sched_addr;
+       u32 wlan_channels[4];
+       u32 wlan_cal_seq;
+       u32 wlan_cal_done;
+       u32 config;
+       u8 *gpm_buf;
+       u8 *sched_buf;
+       bool ready;
+       bool update_2g5g;
+       bool is_2g;
+       bool query_bt;
+       bool unhalt_bt_gpm; /* need send UNHALT */
+       bool halted_bt_gpm; /* HALT sent */
+       bool need_flush_btinfo;
+       bool bt_version_known;
+       bool wlan_channels_update;
+       u8 wlan_ver_major;
+       u8 wlan_ver_minor;
+       u8 bt_ver_major;
+       u8 bt_ver_minor;
+       u8 bt_state;
 };
 
 struct ath_btcoex_hw {
        enum ath_btcoex_scheme scheme;
+       struct ath9k_hw_mci mci;
        bool enabled;
        u8 wlanactive_gpio;
        u8 btactive_gpio;
@@ -59,6 +94,8 @@ struct ath_btcoex_hw {
        u32 bt_coex_mode;       /* Register setting for AR_BT_COEX_MODE */
        u32 bt_coex_weights;    /* Register setting for AR_BT_COEX_WEIGHT */
        u32 bt_coex_mode2;      /* Register setting for AR_BT_COEX_MODE2 */
+       u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
+       u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
 };
 
 void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
index 2741203..6fb719d 100644 (file)
@@ -709,24 +709,29 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
 
        len += snprintf(buf + len, size - len,
                        "Stations:\n"
-                       " tid: addr sched paused buf_q-empty an ac\n"
+                       " tid: addr sched paused buf_q-empty an ac baw\n"
                        " ac: addr sched tid_q-empty txq\n");
 
        spin_lock(&sc->nodes_lock);
        list_for_each_entry(an, &sc->nodes, list) {
+               unsigned short ma = an->maxampdu;
+               if (ma == 0)
+                       ma = 65535; /* see ath_lookup_rate */
                len += snprintf(buf + len, size - len,
-                               "%pM\n", an->sta->addr);
+                               "iface: %pM  sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
+                               an->vif->addr, an->sta->addr, ma,
+                               (unsigned int)(an->mpdudensity));
                if (len >= size)
                        goto done;
 
                for (q = 0; q < WME_NUM_TID; q++) {
                        struct ath_atx_tid *tid = &(an->tid[q]);
                        len += snprintf(buf + len, size - len,
-                                       " tid: %p %s %s %i %p %p\n",
+                                       " tid: %p %s %s %i %p %p %hu\n",
                                        tid, tid->sched ? "sched" : "idle",
                                        tid->paused ? "paused" : "running",
                                        skb_queue_empty(&tid->buf_q),
-                                       tid->an, tid->ac);
+                                       tid->an, tid->ac, tid->baw_size);
                        if (len >= size)
                                goto done;
                }
index 49abd34..5ff7ab9 100644 (file)
@@ -249,7 +249,8 @@ enum eeprom_param {
        EEP_ANT_DIV_CTL1,
        EEP_CHAIN_MASK_REDUCE,
        EEP_ANTENNA_GAIN_2G,
-       EEP_ANTENNA_GAIN_5G
+       EEP_ANTENNA_GAIN_5G,
+       EEP_QUICK_DROP
 };
 
 enum ar5416_rates {
index 9a7520f..61fcab0 100644 (file)
@@ -473,7 +473,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
 
        int i;
        u16 twiceMinEdgePower;
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        u16 scaledPower = 0, minCtlPower;
        u16 numCtlModes;
        const u16 *pCtlMode;
@@ -542,9 +542,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
                else
                        freq = centers.ctl_center;
 
-               if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-                   ah->eep_ops->get_eeprom_rev(ah) <= 2)
-                       twiceMaxEdgePower = MAX_RATE_POWER;
+               twiceMaxEdgePower = MAX_RATE_POWER;
 
                for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
                             pEepData->ctlIndex[i]; i++) {
index 4f5c50a..0981c07 100644 (file)
@@ -569,7 +569,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
 #define REDUCE_SCALED_POWER_BY_TWO_CHAIN     6
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   10
 
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        int i;
        struct cal_ctl_data_ar9287 *rep;
        struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +669,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
                else
                        freq = centers.ctl_center;
 
+               twiceMaxEdgePower = MAX_RATE_POWER;
                /* Walk through the CTL indices stored in EEPROM */
                for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
                        struct cal_ctl_edges *pRdEdgesPower;
index 81e6296..55a21d3 100644 (file)
@@ -1000,7 +1000,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
 #define REDUCE_SCALED_POWER_BY_THREE_CHAIN   9 /* 10*log10(3)*2 */
 
        struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
-       u16 twiceMaxEdgePower = MAX_RATE_POWER;
+       u16 twiceMaxEdgePower;
        int i;
        struct cal_ctl_data *rep;
        struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1121,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
                else
                        freq = centers.ctl_center;
 
-               if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
-                   ah->eep_ops->get_eeprom_rev(ah) <= 2)
-                       twiceMaxEdgePower = MAX_RATE_POWER;
+               twiceMaxEdgePower = MAX_RATE_POWER;
 
                for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
                        if ((((cfgCtl & ~CTL_MODE_M) |
index 655576c..e4ae08e 100644 (file)
@@ -189,8 +189,8 @@ static void ath_btcoex_period_timer(unsigned long data)
        bool is_btscan;
 
        ath9k_ps_wakeup(sc);
-       ath_detect_bt_priority(sc);
-
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+               ath_detect_bt_priority(sc);
        is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
 
        spin_lock_bh(&btcoex->btcoex_lock);
@@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
        ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
                              btcoex->bt_stomp_type);
 
+       ath9k_hw_btcoex_enable(ah);
        spin_unlock_bh(&btcoex->btcoex_lock);
 
        if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@@ -212,8 +213,9 @@ static void ath_btcoex_period_timer(unsigned long data)
        }
 
        ath9k_ps_restore(sc);
+       timer_period = btcoex->btcoex_period / 1000;
        mod_timer(&btcoex->period_timer, jiffies +
-                                 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+                                 msecs_to_jiffies(timer_period));
 }
 
 /*
@@ -239,6 +241,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
         else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
                ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
 
+       ath9k_hw_btcoex_enable(ah);
        spin_unlock_bh(&btcoex->btcoex_lock);
        ath9k_ps_restore(sc);
 }
index e3a02eb..ce606b6 100644 (file)
@@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
        ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
                        btcoex->bt_stomp_type);
 
+       ath9k_hw_btcoex_enable(priv->ah);
        timer_period = is_btscan ? btcoex->btscan_no_stomp :
                btcoex->btcoex_no_stomp;
        ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@@ -108,6 +109,7 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
                ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
        else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
                ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+       ath9k_hw_btcoex_enable(priv->ah);
 }
 
 void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
index 0b9a0e8..f8ce4ea 100644 (file)
@@ -808,7 +808,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
        }
 
        /* Verify whether we must check ANI */
-       if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+       if (ah->config.enable_ani &&
+           (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
                aniflag = true;
                common->ani.checkani_timer = timestamp;
        }
@@ -838,7 +839,7 @@ set_timer:
        * short calibration and long calibration.
        */
        cal_interval = ATH_LONG_CALINTERVAL;
-       if (priv->ah->config.enable_ani)
+       if (ah->config.enable_ani)
                cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
        if (!common->ani.caldone)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
index 8873c6e..7f8fc65 100644 (file)
@@ -504,7 +504,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
                return ecode;
        }
 
-       if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+       if (ah->config.enable_ani) {
                ath9k_hw_ani_setup(ah);
                ath9k_hw_ani_init(ah);
        }
@@ -610,6 +610,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
        if (!AR_SREV_9300_20_OR_LATER(ah))
                ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
 
+       /* disable ANI for 9340 */
+       if (AR_SREV_9340(ah))
+               ah->config.enable_ani = false;
+
        ath9k_hw_init_mode_regs(ah);
 
        if (!ah->is_pciexpress)
@@ -1350,6 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
 
 static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 {
+       bool ret = false;
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1361,13 +1366,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
 
        switch (type) {
        case ATH9K_RESET_POWER_ON:
-               return ath9k_hw_set_reset_power_on(ah);
+               ret = ath9k_hw_set_reset_power_on(ah);
+               break;
        case ATH9K_RESET_WARM:
        case ATH9K_RESET_COLD:
-               return ath9k_hw_set_reset(ah, type);
+               ret = ath9k_hw_set_reset(ah, type);
+               break;
        default:
-               return false;
+               break;
        }
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+               REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+       return ret;
 }
 
 static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1506,6 +1518,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                   struct ath9k_hw_cal_data *caldata, bool bChannelChange)
 {
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
        u32 saveLedState;
        struct ath9k_channel *curchan = ah->curchan;
        u32 saveDefAntenna;
@@ -1513,6 +1526,53 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        u64 tsf = 0;
        int i, r;
        bool allow_fbs = false;
+       bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+       bool save_fullsleep = ah->chip_fullsleep;
+
+       if (mci) {
+
+               ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+               if (mci_hw->bt_state == MCI_BT_CAL_START) {
+                       u32 payload[4] = {0, 0, 0, 0};
+
+                       ath_dbg(common, ATH_DBG_MCI, "MCI stop rx for BT CAL");
+
+                       mci_hw->bt_state = MCI_BT_CAL;
+
+                       /*
+                        * MCI FIX: disable mci interrupt here. This is to avoid
+                        * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+                        * lead to mci_intr reentry.
+                        */
+
+                       ar9003_mci_disable_interrupt(ah);
+
+                       ath_dbg(common, ATH_DBG_MCI, "send WLAN_CAL_GRANT");
+                       MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+                       ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+                                               16, true, false);
+
+                       ath_dbg(common, ATH_DBG_MCI, "\nMCI BT is calibrating");
+
+                       /* Wait BT calibration to be completed for 25ms */
+
+                       if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+                                                                 0, 25000))
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI got BT_CAL_DONE\n");
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI ### BT cal takes to long, force"
+                                       "bt_state to be bt_awake\n");
+                       mci_hw->bt_state = MCI_BT_AWAKE;
+                       /* MCI FIX: enable mci interrupt here */
+                       ar9003_mci_enable_interrupt(ah);
+
+                       return true;
+               }
+       }
+
 
        if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
                return -EIO;
@@ -1550,12 +1610,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                if (ath9k_hw_channel_change(ah, chan)) {
                        ath9k_hw_loadnf(ah, ah->curchan);
                        ath9k_hw_start_nfcal(ah, true);
+                       if (mci && mci_hw->ready)
+                               ar9003_mci_2g5g_switch(ah, true);
+
                        if (AR_SREV_9271(ah))
                                ar9002_hw_load_ani_reg(ah, chan);
                        return 0;
                }
        }
 
+       if (mci) {
+               ar9003_mci_disable_interrupt(ah);
+
+               if (mci_hw->ready && !save_fullsleep) {
+                       ar9003_mci_mute_bt(ah);
+                       udelay(20);
+                       REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+               }
+
+               mci_hw->bt_state = MCI_BT_SLEEP;
+               mci_hw->ready = false;
+       }
+
+
        saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
        if (saveDefAntenna == 0)
                saveDefAntenna = 1;
@@ -1611,6 +1688,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (r)
                return r;
 
+       if (mci)
+               ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
        /*
         * Some AR91xx SoC devices frequently fail to accept TSF writes
         * right after the chip reset. When that happens, write a new
@@ -1728,6 +1808,55 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ath9k_hw_loadnf(ah, chan);
        ath9k_hw_start_nfcal(ah, true);
 
+       if (mci && mci_hw->ready) {
+
+               if (IS_CHAN_2GHZ(chan) &&
+                   (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+                       if (ar9003_mci_check_int(ah,
+                           AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+                           ar9003_mci_check_int(ah,
+                           AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+                               /*
+                                * BT is sleeping. Check if BT wakes up during
+                                * WLAN calibration. If BT wakes up during
+                                * WLAN calibration, need to go through all
+                                * message exchanges again and recal.
+                                */
+
+                               ath_dbg(common, ATH_DBG_MCI, "MCI BT wakes up"
+                                       "during WLAN calibration\n");
+
+                               REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+                                         AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+                                         AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+                               ath_dbg(common, ATH_DBG_MCI, "MCI send"
+                                       "REMOTE_RESET\n");
+                               ar9003_mci_remote_reset(ah, true);
+                               ar9003_mci_send_sys_waking(ah, true);
+                               udelay(1);
+                               if (IS_CHAN_2GHZ(chan))
+                                       ar9003_mci_send_lna_transfer(ah, true);
+
+                               mci_hw->bt_state = MCI_BT_AWAKE;
+
+                               ath_dbg(common, ATH_DBG_MCI, "MCI re-cal\n");
+
+                               if (caldata) {
+                                       caldata->done_txiqcal_once = false;
+                                       caldata->done_txclcal_once = false;
+                                       caldata->rtt_hist.num_readings = 0;
+                               }
+
+                               if (!ath9k_hw_init_cal(ah, chan))
+                                       return -EIO;
+
+                       }
+               }
+               ar9003_mci_enable_interrupt(ah);
+       }
+
        ENABLE_REGWRITE_BUFFER(ah);
 
        ath9k_hw_restore_chainmask(ah);
@@ -1770,6 +1899,21 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        if (ah->btcoex_hw.enabled)
                ath9k_hw_btcoex_enable(ah);
 
+       if (mci && mci_hw->ready) {
+               /*
+                * check BT state again to make
+                * sure it's not changed.
+                */
+
+               ar9003_mci_sync_bt_state(ah);
+               ar9003_mci_2g5g_switch(ah, true);
+
+               if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+                               (mci_hw->query_bt == true)) {
+                       mci_hw->need_flush_btinfo = true;
+               }
+       }
+
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                ar9003_hw_bb_watchdog_config(ah);
 
@@ -1934,6 +2078,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
 {
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
        int status = true, setChip = true;
        static const char *modes[] = {
                "AWAKE",
@@ -1951,12 +2096,35 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
        switch (mode) {
        case ATH9K_PM_AWAKE:
                status = ath9k_hw_set_power_awake(ah, setChip);
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+                       REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
                break;
        case ATH9K_PM_FULL_SLEEP:
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+                       if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+                               (mci->bt_state != MCI_BT_SLEEP) &&
+                               !mci->halted_bt_gpm) {
+                               ath_dbg(common, ATH_DBG_MCI, "MCI halt BT GPM"
+                                               "(full_sleep)");
+                               ar9003_mci_send_coex_halt_bt_gpm(ah,
+                                                                true, true);
+                       }
+
+                       mci->ready = false;
+                       REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+               }
+
                ath9k_set_power_sleep(ah, setChip);
                ah->chip_fullsleep = true;
                break;
        case ATH9K_PM_NETWORK_SLEEP:
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+                       REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
                ath9k_set_power_network_sleep(ah, setChip);
                break;
        default:
@@ -2149,6 +2317,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 
        if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
                chip_chainmask = 1;
+       else if (AR_SREV_9462(ah))
+               chip_chainmask = 3;
        else if (!AR_SREV_9280_20_OR_LATER(ah))
                chip_chainmask = 7;
        else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2234,7 +2404,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
 
        if (common->btcoex_enabled) {
-               if (AR_SREV_9300_20_OR_LATER(ah)) {
+               if (AR_SREV_9462(ah))
+                       btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+               else if (AR_SREV_9300_20_OR_LATER(ah)) {
                        btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
                        btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
                        btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2332,11 +2504,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
                ah->enabled_cals |= TX_IQ_CAL;
-               if (!AR_SREV_9330(ah))
+               if (AR_SREV_9485_OR_LATER(ah))
                        ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
        }
        if (AR_SREV_9462(ah))
-               pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+               pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
 
        return 0;
 }
@@ -2584,7 +2756,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
        struct ath9k_channel *chan = ah->curchan;
        struct ieee80211_channel *channel = chan->chan;
 
-       reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
+       reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
        if (test)
                channel->max_power = MAX_RATE_POWER / 2;
 
index f389b3c..36968c0 100644 (file)
@@ -59,9 +59,6 @@
 #define AT9285_COEX3WIRE_SA_SUBSYSID   0x30aa
 #define AT9285_COEX3WIRE_DA_SUBSYSID   0x30ab
 
-#define AR9300_NUM_BT_WEIGHTS   4
-#define AR9300_NUM_WLAN_WEIGHTS 4
-
 #define ATH_AMPDU_LIMIT_MAX        (64 * 1024 - 1)
 
 #define        ATH_DEFAULT_NOISE_FLOOR -95
 #define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL  4
 #define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED    5
 #define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED      6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA      0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK       0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA        0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK         0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX           0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX           0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX           9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX           8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE      0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA        0x1e
 
 #define AR_GPIOD_MASK               0x00001FFF
 #define AR_GPIO_BIT(_gpio)          (1 << (_gpio))
@@ -203,6 +210,7 @@ enum ath9k_hw_caps {
        ATH9K_HW_CAP_5GHZ                       = BIT(14),
        ATH9K_HW_CAP_APM                        = BIT(15),
        ATH9K_HW_CAP_RTT                        = BIT(16),
+       ATH9K_HW_CAP_MCI                        = BIT(17),
 };
 
 struct ath9k_hw_capabilities {
@@ -268,6 +276,7 @@ enum ath9k_int {
        ATH9K_INT_TX = 0x00000040,
        ATH9K_INT_TXDESC = 0x00000080,
        ATH9K_INT_TIM_TIMER = 0x00000100,
+       ATH9K_INT_MCI = 0x00000200,
        ATH9K_INT_BB_WATCHDOG = 0x00000400,
        ATH9K_INT_TXURN = 0x00000800,
        ATH9K_INT_MIB = 0x00001000,
@@ -419,6 +428,161 @@ enum ath9k_rx_qtype {
        ATH9K_RX_QUEUE_MAX,
 };
 
+enum mci_message_header {              /* length of payload */
+       MCI_LNA_CTRL     = 0x10,        /* len = 0 */
+       MCI_CONT_NACK    = 0x20,        /* len = 0 */
+       MCI_CONT_INFO    = 0x30,        /* len = 4 */
+       MCI_CONT_RST     = 0x40,        /* len = 0 */
+       MCI_SCHD_INFO    = 0x50,        /* len = 16 */
+       MCI_CPU_INT      = 0x60,        /* len = 4 */
+       MCI_SYS_WAKING   = 0x70,        /* len = 0 */
+       MCI_GPM          = 0x80,        /* len = 16 */
+       MCI_LNA_INFO     = 0x90,        /* len = 1 */
+       MCI_LNA_STATE    = 0x94,
+       MCI_LNA_TAKE     = 0x98,
+       MCI_LNA_TRANS    = 0x9c,
+       MCI_SYS_SLEEPING = 0xa0,        /* len = 0 */
+       MCI_REQ_WAKE     = 0xc0,        /* len = 0 */
+       MCI_DEBUG_16     = 0xfe,        /* len = 2 */
+       MCI_REMOTE_RESET = 0xff         /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+       MCI_GPM_COEX_PROFILE_UNKNOWN,
+       MCI_GPM_COEX_PROFILE_RFCOMM,
+       MCI_GPM_COEX_PROFILE_A2DP,
+       MCI_GPM_COEX_PROFILE_HID,
+       MCI_GPM_COEX_PROFILE_BNEP,
+       MCI_GPM_COEX_PROFILE_VOICE,
+       MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+       MCI_GPM_COEX_W_GPM_PAYLOAD      = 1,
+       MCI_GPM_COEX_B_GPM_TYPE         = 4,
+       MCI_GPM_COEX_B_GPM_OPCODE       = 5,
+       /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+       MCI_GPM_WLAN_CAL_W_SEQUENCE     = 2,
+
+       /* MCI_GPM_COEX_VERSION_QUERY */
+       /* MCI_GPM_COEX_VERSION_RESPONSE */
+       MCI_GPM_COEX_B_MAJOR_VERSION    = 6,
+       MCI_GPM_COEX_B_MINOR_VERSION    = 7,
+       /* MCI_GPM_COEX_STATUS_QUERY */
+       MCI_GPM_COEX_B_BT_BITMAP        = 6,
+       MCI_GPM_COEX_B_WLAN_BITMAP      = 7,
+       /* MCI_GPM_COEX_HALT_BT_GPM */
+       MCI_GPM_COEX_B_HALT_STATE       = 6,
+       /* MCI_GPM_COEX_WLAN_CHANNELS */
+       MCI_GPM_COEX_B_CHANNEL_MAP      = 6,
+       /* MCI_GPM_COEX_BT_PROFILE_INFO */
+       MCI_GPM_COEX_B_PROFILE_TYPE     = 6,
+       MCI_GPM_COEX_B_PROFILE_LINKID   = 7,
+       MCI_GPM_COEX_B_PROFILE_STATE    = 8,
+       MCI_GPM_COEX_B_PROFILE_ROLE     = 9,
+       MCI_GPM_COEX_B_PROFILE_RATE     = 10,
+       MCI_GPM_COEX_B_PROFILE_VOTYPE   = 11,
+       MCI_GPM_COEX_H_PROFILE_T        = 12,
+       MCI_GPM_COEX_B_PROFILE_W        = 14,
+       MCI_GPM_COEX_B_PROFILE_A        = 15,
+       /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+       MCI_GPM_COEX_B_STATUS_TYPE      = 6,
+       MCI_GPM_COEX_B_STATUS_LINKID    = 7,
+       MCI_GPM_COEX_B_STATUS_STATE     = 8,
+       /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+       MCI_GPM_COEX_W_BT_FLAGS         = 6,
+       MCI_GPM_COEX_B_BT_FLAGS_OP      = 10
+};
+
+enum mci_gpm_subtype {
+       MCI_GPM_BT_CAL_REQ      = 0,
+       MCI_GPM_BT_CAL_GRANT    = 1,
+       MCI_GPM_BT_CAL_DONE     = 2,
+       MCI_GPM_WLAN_CAL_REQ    = 3,
+       MCI_GPM_WLAN_CAL_GRANT  = 4,
+       MCI_GPM_WLAN_CAL_DONE   = 5,
+       MCI_GPM_COEX_AGENT      = 0x0c,
+       MCI_GPM_RSVD_PATTERN    = 0xfe,
+       MCI_GPM_RSVD_PATTERN32  = 0xfefefefe,
+       MCI_GPM_BT_DEBUG        = 0xff
+};
+
+enum mci_bt_state {
+       MCI_BT_SLEEP,
+       MCI_BT_AWAKE,
+       MCI_BT_CAL_START,
+       MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+       MCI_STATE_ENABLE,
+       MCI_STATE_INIT_GPM_OFFSET,
+       MCI_STATE_NEXT_GPM_OFFSET,
+       MCI_STATE_LAST_GPM_OFFSET,
+       MCI_STATE_BT,
+       MCI_STATE_SET_BT_SLEEP,
+       MCI_STATE_SET_BT_AWAKE,
+       MCI_STATE_SET_BT_CAL_START,
+       MCI_STATE_SET_BT_CAL,
+       MCI_STATE_LAST_SCHD_MSG_OFFSET,
+       MCI_STATE_REMOTE_SLEEP,
+       MCI_STATE_CONT_RSSI_POWER,
+       MCI_STATE_CONT_PRIORITY,
+       MCI_STATE_CONT_TXRX,
+       MCI_STATE_RESET_REQ_WAKE,
+       MCI_STATE_SEND_WLAN_COEX_VERSION,
+       MCI_STATE_SET_BT_COEX_VERSION,
+       MCI_STATE_SEND_WLAN_CHANNELS,
+       MCI_STATE_SEND_VERSION_QUERY,
+       MCI_STATE_SEND_STATUS_QUERY,
+       MCI_STATE_NEED_FLUSH_BT_INFO,
+       MCI_STATE_SET_CONCUR_TX_PRI,
+       MCI_STATE_RECOVER_RX,
+       MCI_STATE_NEED_FTP_STOMP,
+       MCI_STATE_NEED_TUNING,
+       MCI_STATE_DEBUG,
+       MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+       MCI_GPM_COEX_VERSION_QUERY,
+       MCI_GPM_COEX_VERSION_RESPONSE,
+       MCI_GPM_COEX_STATUS_QUERY,
+       MCI_GPM_COEX_HALT_BT_GPM,
+       MCI_GPM_COEX_WLAN_CHANNELS,
+       MCI_GPM_COEX_BT_PROFILE_INFO,
+       MCI_GPM_COEX_BT_STATUS_UPDATE,
+       MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE  0
+#define MCI_GPM_MORE    1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm)        do {                      \
+       *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+                               MCI_GPM_RSVD_PATTERN32;   \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm)   \
+       (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+       (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type)        do {                       \
+       *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do {              \
+       *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff;    \
+       *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
 struct ath9k_beacon_state {
        u32 bs_nexttbtt;
        u32 bs_nextdtim;
@@ -791,8 +955,6 @@ struct ath_hw {
 
        /* Bluetooth coexistance */
        struct ath_btcoex_hw btcoex_hw;
-       u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
-       u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
 
        u32 intr_txqs;
        u8 txchainmask;
@@ -948,7 +1110,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
 void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
 void ath9k_hw_setopmode(struct ath_hw *ah);
 void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
 void ath9k_hw_write_associd(struct ath_hw *ah);
 u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1041,6 +1202,32 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
 void ath9k_hw_proc_mib_event(struct ath_hw *ah);
 void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
 
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+                            u32 *payload, u8 len, bool wait_done,
+                            bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+                     u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+                                     bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+                           u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+                     bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+                             u32 *rx_msg_intr);
+
 #define ATH9K_CLOCK_RATE_CCK           22
 #define ATH9K_CLOCK_RATE_5GHZ_OFDM     40
 #define ATH9K_CLOCK_RATE_2GHZ_OFDM     44
index d4c909f..41b72fa 100644 (file)
@@ -258,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
 
        if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
                max_streams = 1;
+       else if (AR_SREV_9462(ah))
+               max_streams = 2;
        else if (AR_SREV_9300_20_OR_LATER(ah))
                max_streams = 3;
        else
@@ -408,6 +410,7 @@ fail:
 static int ath9k_init_btcoex(struct ath_softc *sc)
 {
        struct ath_txq *txq;
+       struct ath_hw *ah = sc->sc_ah;
        int r;
 
        switch (sc->sc_ah->btcoex_hw.scheme) {
@@ -425,6 +428,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
                ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
                sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
                break;
+       case ATH_BTCOEX_CFG_MCI:
+               sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+               sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+               INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+               r = ath_mci_setup(sc);
+               if (r)
+                       return r;
+
+               if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+                       ah->btcoex_hw.mci.ready = false;
+                       ah->btcoex_hw.mci.bt_state = 0;
+                       ah->btcoex_hw.mci.bt_ver_major = 3;
+                       ah->btcoex_hw.mci.bt_ver_minor = 0;
+                       ah->btcoex_hw.mci.bt_version_known = false;
+                       ah->btcoex_hw.mci.update_2g5g = true;
+                       ah->btcoex_hw.mci.is_2g = true;
+                       ah->btcoex_hw.mci.wlan_channels_update = false;
+                       ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+                       ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+                       ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+                       ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+                       ah->btcoex_hw.mci.query_bt = true;
+                       ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+                       ah->btcoex_hw.mci.halted_bt_gpm = false;
+                       ah->btcoex_hw.mci.need_flush_btinfo = false;
+                       ah->btcoex_hw.mci.wlan_cal_seq = 0;
+                       ah->btcoex_hw.mci.wlan_cal_done = 0;
+                       ah->btcoex_hw.mci.config = 0x2201;
+               }
+               break;
        default:
                WARN_ON(1);
                break;
@@ -695,6 +729,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+       hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
 
        hw->queues = 4;
        hw->max_rates = 4;
@@ -836,6 +871,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
            sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
                ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
 
+       if (sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_MCI)
+               ath_mci_cleanup(sc);
+
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
                if (ATH_TXQ_SETUP(sc, i))
                        ath_tx_cleanupq(sc, &sc->tx.txq[i]);
index ecdb6fd..0e4fbb3 100644 (file)
@@ -760,7 +760,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
                return true;
 
        host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
-       if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+       if (((host_isr & AR_INTR_MAC_IRQ) ||
+            (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+           (host_isr != AR_INTR_SPURIOUS))
                return true;
 
        host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -798,6 +801,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        u32 sync_default = AR_INTR_SYNC_DEFAULT;
+       u32 async_mask;
 
        if (!(ah->imask & ATH9K_INT_GLOBAL))
                return;
@@ -812,13 +816,16 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
        if (AR_SREV_9340(ah))
                sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
 
+       async_mask = AR_INTR_MAC_IRQ;
+
+       if (ah->imask & ATH9K_INT_MCI)
+               async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
        ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
        REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
        if (!AR_SREV_9100(ah)) {
-               REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
-                         AR_INTR_MAC_IRQ);
-               REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+               REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+               REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
 
                REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
                REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
index d2348a5..7d92004 100644 (file)
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
        if (--sc->ps_usecount != 0)
                goto unlock;
 
-       if (sc->ps_idle)
+       if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
                mode = ATH9K_PM_FULL_SLEEP;
        else if (sc->ps_enabled &&
                 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -332,7 +332,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
                hchan = ah->curchan;
        }
 
-       if (fastcc && !ath9k_hw_check_alive(ah))
+       if (fastcc && (ah->chip_fullsleep ||
+           !ath9k_hw_check_alive(ah)))
                fastcc = false;
 
        if (!ath_prepare_reset(sc, retry_tx, flush))
@@ -561,7 +562,6 @@ void ath_ani_calibrate(unsigned long data)
        /* Long calibration runs independently of short calibration. */
        if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
                longcal = true;
-               ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
                common->ani.longcal_timer = timestamp;
        }
 
@@ -569,8 +569,6 @@ void ath_ani_calibrate(unsigned long data)
        if (!common->ani.caldone) {
                if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
                        shortcal = true;
-                       ath_dbg(common, ATH_DBG_ANI,
-                               "shortcal @%lu\n", jiffies);
                        common->ani.shortcal_timer = timestamp;
                        common->ani.resetcal_timer = timestamp;
                }
@@ -584,8 +582,9 @@ void ath_ani_calibrate(unsigned long data)
        }
 
        /* Verify whether we must check ANI */
-       if ((timestamp - common->ani.checkani_timer) >=
-            ah->config.ani_poll_interval) {
+       if (sc->sc_ah->config.enable_ani
+           && (timestamp - common->ani.checkani_timer) >=
+           ah->config.ani_poll_interval) {
                aniflag = true;
                common->ani.checkani_timer = timestamp;
        }
@@ -605,6 +604,11 @@ void ath_ani_calibrate(unsigned long data)
                                                ah->rxchainmask, longcal);
        }
 
+       ath_dbg(common, ATH_DBG_ANI,
+               "Calibration @%lu finished: %s %s %s, caldone: %s\n", jiffies,
+               longcal ? "long" : "", shortcal ? "short" : "",
+               aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
        ath9k_ps_restore(sc);
 
 set_timer:
@@ -630,7 +634,8 @@ set_timer:
        }
 }
 
-static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
+static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
+                           struct ieee80211_vif *vif)
 {
        struct ath_node *an;
        an = (struct ath_node *)sta->drv_priv;
@@ -640,6 +645,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
        list_add(&an->list, &sc->nodes);
        spin_unlock(&sc->nodes_lock);
        an->sta = sta;
+       an->vif = vif;
 #endif
        if (sc->sc_flags & SC_OP_TXAGGR) {
                ath_tx_node_init(sc, an);
@@ -740,6 +746,9 @@ void ath9k_tasklet(unsigned long data)
                if (status & ATH9K_INT_GENTIMER)
                        ath_gen_timer_isr(sc->sc_ah);
 
+       if (status & ATH9K_INT_MCI)
+               ath_mci_intr(sc);
+
 out:
        /* re-enable hardware interrupt */
        ath9k_hw_enable_interrupts(ah);
@@ -762,7 +771,8 @@ irqreturn_t ath_isr(int irq, void *dev)
                ATH9K_INT_BMISS |               \
                ATH9K_INT_CST |                 \
                ATH9K_INT_TSFOOR |              \
-               ATH9K_INT_GENTIMER)
+               ATH9K_INT_GENTIMER |            \
+               ATH9K_INT_MCI)
 
        struct ath_softc *sc = dev;
        struct ath_hw *ah = sc->sc_ah;
@@ -880,82 +890,6 @@ chip_reset:
 #undef SCHED_INTR
 }
 
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_channel *channel = hw->conf.channel;
-       int r;
-
-       ath9k_ps_wakeup(sc);
-       spin_lock_bh(&sc->sc_pcu_lock);
-       atomic_set(&ah->intr_ref_cnt, -1);
-
-       ath9k_hw_configpcipowersave(ah, false);
-
-       if (!ah->curchan)
-               ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
-       r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
-       if (r) {
-               ath_err(common,
-                       "Unable to reset channel (%u MHz), reset status %d\n",
-                       channel->center_freq, r);
-       }
-
-       ath_complete_reset(sc, true);
-
-       /* Enable LED */
-       ath9k_hw_cfg_output(ah, ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-       ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
-       spin_unlock_bh(&sc->sc_pcu_lock);
-
-       ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ieee80211_channel *channel = hw->conf.channel;
-       int r;
-
-       ath9k_ps_wakeup(sc);
-
-       ath_cancel_work(sc);
-
-       spin_lock_bh(&sc->sc_pcu_lock);
-
-       /*
-        * Keep the LED on when the radio is disabled
-        * during idle unassociated state.
-        */
-       if (!sc->ps_idle) {
-               ath9k_hw_set_gpio(ah, ah->led_pin, 1);
-               ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-       }
-
-       ath_prepare_reset(sc, false, true);
-
-       if (!ah->curchan)
-               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
-       r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
-       if (r) {
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Unable to reset channel (%u MHz), reset status %d\n",
-                       channel->center_freq, r);
-       }
-
-       ath9k_hw_phy_disable(ah);
-
-       ath9k_hw_configpcipowersave(ah, true);
-
-       spin_unlock_bh(&sc->sc_pcu_lock);
-       ath9k_ps_restore(sc);
-}
-
 static int ath_reset(struct ath_softc *sc, bool retry_tx)
 {
        int r;
@@ -1091,6 +1025,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
         * and then setup of the interrupt mask.
         */
        spin_lock_bh(&sc->sc_pcu_lock);
+
+       atomic_set(&ah->intr_ref_cnt, -1);
+
        r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (r) {
                ath_err(common,
@@ -1117,6 +1054,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
        if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
                ah->imask |= ATH9K_INT_CST;
 
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+               ah->imask |= ATH9K_INT_MCI;
+
        sc->sc_flags &= ~SC_OP_INVALID;
        sc->sc_ah->is_monitoring = false;
 
@@ -1129,12 +1069,25 @@ static int ath9k_start(struct ieee80211_hw *hw)
                goto mutex_unlock;
        }
 
+       if (ah->led_pin >= 0) {
+               ath9k_hw_cfg_output(ah, ah->led_pin,
+                                   AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+               ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+       }
+
+       /*
+        * Reset key cache to sane defaults (all entries cleared) instead of
+        * semi-random values after suspend/resume.
+        */
+       ath9k_cmn_init_crypto(sc->sc_ah);
+
        spin_unlock_bh(&sc->sc_pcu_lock);
 
        if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
            !ah->btcoex_hw.enabled) {
-               ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
-                                          AR_STOMP_LOW_WLAN_WGHT);
+               if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+                       ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+                                                  AR_STOMP_LOW_WLAN_WGHT);
                ath9k_hw_btcoex_enable(ah);
 
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -1173,6 +1126,13 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                }
        }
 
+       /*
+        * Cannot tx while the hardware is in full sleep, it first needs a full
+        * chip reset to recover from that
+        */
+       if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+               goto exit;
+
        if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
                /*
                 * We are using PS-Poll and mac80211 can request TX while in
@@ -1219,6 +1179,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
+       bool prev_idle;
 
        mutex_lock(&sc->mutex);
 
@@ -1237,6 +1198,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
                ath9k_hw_btcoex_disable(ah);
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
                        ath9k_btcoex_timer_pause(sc);
+               ath_mci_flush_profile(&sc->btcoex.mci);
        }
 
        spin_lock_bh(&sc->sc_pcu_lock);
@@ -1248,35 +1210,45 @@ static void ath9k_stop(struct ieee80211_hw *hw)
         * before setting the invalid flag. */
        ath9k_hw_disable_interrupts(ah);
 
-       if (!(sc->sc_flags & SC_OP_INVALID)) {
-               ath_drain_all_txq(sc, false);
-               ath_stoprecv(sc);
-               ath9k_hw_phy_disable(ah);
-       } else
-               sc->rx.rxlink = NULL;
+       spin_unlock_bh(&sc->sc_pcu_lock);
+
+       /* we can now sync irq and kill any running tasklets, since we already
+        * disabled interrupts and not holding a spin lock */
+       synchronize_irq(sc->irq);
+       tasklet_kill(&sc->intr_tq);
+       tasklet_kill(&sc->bcon_tasklet);
+
+       prev_idle = sc->ps_idle;
+       sc->ps_idle = true;
+
+       spin_lock_bh(&sc->sc_pcu_lock);
+
+       if (ah->led_pin >= 0) {
+               ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+               ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+       }
+
+       ath_prepare_reset(sc, false, true);
 
        if (sc->rx.frag) {
                dev_kfree_skb_any(sc->rx.frag);
                sc->rx.frag = NULL;
        }
 
-       /* disable HAL and put h/w to sleep */
-       ath9k_hw_disable(ah);
+       if (!ah->curchan)
+               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
 
-       spin_unlock_bh(&sc->sc_pcu_lock);
+       ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+       ath9k_hw_phy_disable(ah);
 
-       /* we can now sync irq and kill any running tasklets, since we already
-        * disabled interrupts and not holding a spin lock */
-       synchronize_irq(sc->irq);
-       tasklet_kill(&sc->intr_tq);
-       tasklet_kill(&sc->bcon_tasklet);
+       ath9k_hw_configpcipowersave(ah, true);
 
-       ath9k_ps_restore(sc);
+       spin_unlock_bh(&sc->sc_pcu_lock);
 
-       sc->ps_idle = true;
-       ath_radio_disable(sc, hw);
+       ath9k_ps_restore(sc);
 
        sc->sc_flags |= SC_OP_INVALID;
+       sc->ps_idle = prev_idle;
 
        mutex_unlock(&sc->mutex);
 
@@ -1616,8 +1588,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &hw->conf;
-       bool disable_radio = false;
 
+       ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
        /*
@@ -1628,13 +1600,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
         */
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
                sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-               if (!sc->ps_idle) {
-                       ath_radio_enable(sc, hw);
-                       ath_dbg(common, ATH_DBG_CONFIG,
-                               "not-idle: enabling radio\n");
-               } else {
-                       disable_radio = true;
-               }
+               if (sc->ps_idle)
+                       ath_cancel_work(sc);
        }
 
        /*
@@ -1741,18 +1708,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                ath_dbg(common, ATH_DBG_CONFIG,
                        "Set power: %d\n", conf->power_level);
                sc->config.txpowlimit = 2 * conf->power_level;
-               ath9k_ps_wakeup(sc);
                ath9k_cmn_update_txpow(ah, sc->curtxpow,
                                       sc->config.txpowlimit, &sc->curtxpow);
-               ath9k_ps_restore(sc);
-       }
-
-       if (disable_radio) {
-               ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
-               ath_radio_disable(sc, hw);
        }
 
        mutex_unlock(&sc->mutex);
+       ath9k_ps_restore(sc);
 
        return 0;
 }
@@ -1798,7 +1759,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
        struct ath_node *an = (struct ath_node *) sta->drv_priv;
        struct ieee80211_key_conf ps_key = { };
 
-       ath_node_attach(sc, sta);
+       ath_node_attach(sc, sta, vif);
 
        if (vif->type != NL80211_IFTYPE_AP &&
            vif->type != NL80211_IFTYPE_AP_VLAN)
@@ -2320,9 +2281,6 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
                return;
        }
 
-       if (drop)
-               timeout = 1;
-
        for (j = 0; j < timeout; j++) {
                bool npend = false;
 
@@ -2340,21 +2298,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
                }
 
                if (!npend)
-                   goto out;
+                   break;
        }
 
-       ath9k_ps_wakeup(sc);
-       spin_lock_bh(&sc->sc_pcu_lock);
-       drain_txq = ath_drain_all_txq(sc, false);
-       spin_unlock_bh(&sc->sc_pcu_lock);
+       if (drop) {
+               ath9k_ps_wakeup(sc);
+               spin_lock_bh(&sc->sc_pcu_lock);
+               drain_txq = ath_drain_all_txq(sc, false);
+               spin_unlock_bh(&sc->sc_pcu_lock);
 
-       if (!drain_txq)
-               ath_reset(sc, false);
+               if (!drain_txq)
+                       ath_reset(sc, false);
 
-       ath9k_ps_restore(sc);
-       ieee80211_wake_queues(hw);
+               ath9k_ps_restore(sc);
+               ieee80211_wake_queues(hw);
+       }
 
-out:
        ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
        mutex_unlock(&sc->mutex);
 }
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
new file mode 100644 (file)
index 0000000..d678040
--- /dev/null
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "ath9k.h"
+#include "mci.h"
+
+u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+
+static struct ath_mci_profile_info*
+ath_mci_find_profile(struct ath_mci_profile *mci,
+                    struct ath_mci_profile_info *info)
+{
+       struct ath_mci_profile_info *entry;
+
+       list_for_each_entry(entry, &mci->info, list) {
+               if (entry->conn_handle == info->conn_handle)
+                       break;
+       }
+       return entry;
+}
+
+static bool ath_mci_add_profile(struct ath_common *common,
+                               struct ath_mci_profile *mci,
+                               struct ath_mci_profile_info *info)
+{
+       struct ath_mci_profile_info *entry;
+
+       if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
+           (info->type == MCI_GPM_COEX_PROFILE_VOICE)) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Too many SCO profile, failed to add new profile\n");
+               return false;
+       }
+
+       if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
+           (info->type != MCI_GPM_COEX_PROFILE_VOICE)) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Too many ACL profile, failed to add new profile\n");
+               return false;
+       }
+
+       entry = ath_mci_find_profile(mci, info);
+
+       if (entry)
+               memcpy(entry, info, 10);
+       else {
+               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry)
+                       return false;
+
+               memcpy(entry, info, 10);
+               INC_PROF(mci, info);
+               list_add_tail(&info->list, &mci->info);
+       }
+       return true;
+}
+
+static void ath_mci_del_profile(struct ath_common *common,
+                               struct ath_mci_profile *mci,
+                               struct ath_mci_profile_info *info)
+{
+       struct ath_mci_profile_info *entry;
+
+       entry = ath_mci_find_profile(mci, info);
+
+       if (!entry) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Profile to be deleted not found\n");
+               return;
+       }
+       DEC_PROF(mci, entry);
+       list_del(&entry->list);
+       kfree(entry);
+}
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci)
+{
+       struct ath_mci_profile_info *info, *tinfo;
+
+       list_for_each_entry_safe(info, tinfo, &mci->info, list) {
+               list_del(&info->list);
+               DEC_PROF(mci, info);
+               kfree(info);
+       }
+       mci->aggr_limit = 0;
+}
+
+static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
+{
+       struct ath_mci_profile *mci = &btcoex->mci;
+       u32 wlan_airtime = btcoex->btcoex_period *
+                               (100 - btcoex->duty_cycle) / 100;
+
+       /*
+        * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
+        * When wlan_airtime is less than 4ms, aggregation limit has to be
+        * adjusted half of wlan_airtime to ensure that the aggregation can fit
+        * without collision with BT traffic.
+        */
+       if ((wlan_airtime <= 4) &&
+           (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
+               mci->aggr_limit = 2 * wlan_airtime;
+}
+
+static void ath_mci_update_scheme(struct ath_softc *sc)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_btcoex *btcoex = &sc->btcoex;
+       struct ath_mci_profile *mci = &btcoex->mci;
+       struct ath_mci_profile_info *info;
+       u32 num_profile = NUM_PROF(mci);
+
+       if (num_profile == 1) {
+               info = list_first_entry(&mci->info,
+                                       struct ath_mci_profile_info,
+                                       list);
+               if (mci->num_sco && info->T == 12) {
+                       mci->aggr_limit = 8;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "Single SCO, aggregation limit 2 ms\n");
+               } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
+                          !info->master) {
+                       btcoex->btcoex_period = 60;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "Single slave PAN/FTP, bt period 60 ms\n");
+               } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
+                        (info->T > 0 && info->T < 50) &&
+                        (info->A > 1 || info->W > 1)) {
+                       btcoex->duty_cycle = 30;
+                       mci->aggr_limit = 8;
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "Multiple attempt/timeout single HID "
+                               "aggregation limit 2 ms dutycycle 30%%\n");
+               }
+       } else if ((num_profile == 2) && (mci->num_hid == 2)) {
+               btcoex->duty_cycle = 30;
+               mci->aggr_limit = 8;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
+       } else if (num_profile > 3) {
+               mci->aggr_limit = 6;
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Three or more profiles aggregation limit 1.5 ms\n");
+       }
+
+       if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
+               if (IS_CHAN_HT(sc->sc_ah->curchan))
+                       ath_mci_adjust_aggr_limit(btcoex);
+               else
+                       btcoex->btcoex_period >>= 1;
+       }
+
+       ath9k_hw_btcoex_disable(sc->sc_ah);
+       ath9k_btcoex_timer_pause(sc);
+
+       if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
+               return;
+
+       btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+       if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
+               btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
+
+       btcoex->btcoex_period *= 1000;
+       btcoex->btcoex_no_stomp =  btcoex->btcoex_period *
+                                       (100 - btcoex->duty_cycle) / 100;
+
+       ath9k_hw_btcoex_enable(sc->sc_ah);
+       ath9k_btcoex_timer_resume(sc);
+}
+
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 payload[4] = {0, 0, 0, 0};
+
+       switch (opcode) {
+       case MCI_GPM_BT_CAL_REQ:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_REQ\n");
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+                       ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+                       ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI State mismatches: %d\n",
+                               ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+               break;
+
+       case MCI_GPM_BT_CAL_DONE:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_DONE\n");
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+                       ath_dbg(common, ATH_DBG_MCI, "MCI error illegal!\n");
+               else
+                       ath_dbg(common, ATH_DBG_MCI, "MCI BT not in CAL state\n");
+
+               break;
+
+       case MCI_GPM_BT_CAL_GRANT:
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI received BT_CAL_GRANT\n");
+
+               /* Send WLAN_CAL_DONE for now */
+               ath_dbg(common, ATH_DBG_MCI, "MCI send WLAN_CAL_DONE\n");
+               MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+               ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+                                       16, false, true);
+               break;
+
+       default:
+               ath_dbg(common, ATH_DBG_MCI, "MCI Unknown GPM CAL message\n");
+               break;
+       }
+}
+
+void ath_mci_process_profile(struct ath_softc *sc,
+                            struct ath_mci_profile_info *info)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_btcoex *btcoex = &sc->btcoex;
+       struct ath_mci_profile *mci = &btcoex->mci;
+
+       if (info->start) {
+               if (!ath_mci_add_profile(common, mci, info))
+                       return;
+       } else
+               ath_mci_del_profile(common, mci, info);
+
+       btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+       mci->aggr_limit = mci->num_sco ? 6 : 0;
+       if (NUM_PROF(mci)) {
+               btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+               btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+       } else {
+               btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+                                                       ATH_BTCOEX_STOMP_LOW;
+               btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+       }
+
+       ath_mci_update_scheme(sc);
+}
+
+void ath_mci_process_status(struct ath_softc *sc,
+                           struct ath_mci_profile_status *status)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_btcoex *btcoex = &sc->btcoex;
+       struct ath_mci_profile *mci = &btcoex->mci;
+       struct ath_mci_profile_info info;
+       int i = 0, old_num_mgmt = mci->num_mgmt;
+
+       /* Link status type are not handled */
+       if (status->is_link) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Skip link type status update\n");
+               return;
+       }
+
+       memset(&info, 0, sizeof(struct ath_mci_profile_info));
+
+       info.conn_handle = status->conn_handle;
+       if (ath_mci_find_profile(mci, &info)) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Skip non link state update for existing profile %d\n",
+                       status->conn_handle);
+               return;
+       }
+       if (status->conn_handle >= ATH_MCI_MAX_PROFILE) {
+               ath_dbg(common, ATH_DBG_MCI,
+                       "Ignore too many non-link update\n");
+               return;
+       }
+       if (status->is_critical)
+               __set_bit(status->conn_handle, mci->status);
+       else
+               __clear_bit(status->conn_handle, mci->status);
+
+       mci->num_mgmt = 0;
+       do {
+               if (test_bit(i, mci->status))
+                       mci->num_mgmt++;
+       } while (++i < ATH_MCI_MAX_PROFILE);
+
+       if (old_num_mgmt != mci->num_mgmt)
+               ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_mci_profile_info profile_info;
+       struct ath_mci_profile_status profile_status;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       u32 version;
+       u8 major;
+       u8 minor;
+       u32 seq_num;
+
+       switch (opcode) {
+
+       case MCI_GPM_COEX_VERSION_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Query.\n");
+               version = ar9003_mci_state(ah,
+                               MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+               break;
+
+       case MCI_GPM_COEX_VERSION_RESPONSE:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Version Response.\n");
+               major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+               minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI BT Coex version: %d.%d\n", major, minor);
+               version = (major << 8) + minor;
+               version = ar9003_mci_state(ah,
+                         MCI_STATE_SET_BT_COEX_VERSION, &version);
+               break;
+
+       case MCI_GPM_COEX_STATUS_QUERY:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX Status Query = 0x%02x.\n",
+                       *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+               ar9003_mci_state(ah,
+               MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+               break;
+
+       case MCI_GPM_COEX_BT_PROFILE_INFO:
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM Coex BT profile info\n");
+               memcpy(&profile_info,
+                      (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+               if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+                   || (profile_info.type >=
+                                           MCI_GPM_COEX_PROFILE_MAX)) {
+
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "illegal profile type = %d,"
+                               "state = %d\n", profile_info.type,
+                               profile_info.start);
+                       break;
+               }
+
+               ath_mci_process_profile(sc, &profile_info);
+               break;
+
+       case MCI_GPM_COEX_BT_STATUS_UPDATE:
+               profile_status.is_link = *(rx_payload +
+                                          MCI_GPM_COEX_B_STATUS_TYPE);
+               profile_status.conn_handle = *(rx_payload +
+                                              MCI_GPM_COEX_B_STATUS_LINKID);
+               profile_status.is_critical = *(rx_payload +
+                                              MCI_GPM_COEX_B_STATUS_STATE);
+
+               seq_num = *((u32 *)(rx_payload + 12));
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI Recv GPM COEX BT_Status_Update: "
+                       "is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+                       profile_status.is_link, profile_status.conn_handle,
+                       profile_status.is_critical, seq_num);
+
+               ath_mci_process_status(sc, &profile_status);
+               break;
+
+       default:
+               ath_dbg(common, ATH_DBG_MCI,
+               "MCI Unknown GPM COEX message = 0x%02x\n", opcode);
+               break;
+       }
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+       int error = 0;
+
+       buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+                                         &buf->bf_paddr, GFP_KERNEL);
+
+       if (buf->bf_addr == NULL) {
+               error = -ENOMEM;
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       memset(buf, 0, sizeof(*buf));
+       return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+       if (buf->bf_addr) {
+               dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+                                                       buf->bf_paddr);
+               memset(buf, 0, sizeof(*buf));
+       }
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_mci_coex *mci = &sc->mci_coex;
+       int error = 0;
+
+       mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+       if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+               ath_dbg(common, ATH_DBG_FATAL, "MCI buffer alloc failed\n");
+               error = -ENOMEM;
+               goto fail;
+       }
+
+       mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+       memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+                                               mci->sched_buf.bf_len);
+
+       mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+       mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+                                                       mci->sched_buf.bf_len;
+       mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+       /* initialize the buffer */
+       memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+       ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+                        mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+                        mci->sched_buf.bf_paddr);
+fail:
+       return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_mci_coex *mci = &sc->mci_coex;
+
+       /*
+        * both schedule and gpm buffers will be released
+        */
+       ath_mci_buf_free(sc, &mci->sched_buf);
+       ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+       struct ath_mci_coex *mci = &sc->mci_coex;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 mci_int, mci_int_rxmsg;
+       u32 offset, subtype, opcode;
+       u32 *pgpm;
+       u32 more_data = MCI_GPM_MORE;
+       bool skip_gpm = false;
+
+       ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+       if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+               ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI interrupt but MCI disabled\n");
+
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+                       mci_int, mci_int_rxmsg);
+               return;
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+               u32 payload[4] = { 0xffffffff, 0xffffffff,
+                                  0xffffffff, 0xffffff00};
+
+               /*
+                * The following REMOTE_RESET and SYS_WAKING used to sent
+                * only when BT wake up. Now they are always sent, as a
+                * recovery method to reset BT MCI's RX alignment.
+                */
+               ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send REMOTE_RESET\n");
+
+               ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+                                       payload, 16, true, false);
+               ath_dbg(common, ATH_DBG_MCI, "MCI interrupt send SYS_WAKING\n");
+               ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+                                       NULL, 0, true, false);
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+               ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+               /*
+                * always do this for recovery and 2G/5G toggling and LNA_TRANS
+                */
+               ath_dbg(common, ATH_DBG_MCI, "MCI Set BT state to AWAKE.\n");
+               ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+       }
+
+       /* Processing SYS_WAKING/SYS_SLEEPING */
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+                       if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+                                       == MCI_BT_SLEEP)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI BT stays in sleep mode\n");
+                       else {
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI Set BT state to AWAKE.\n");
+                               ar9003_mci_state(ah,
+                                                MCI_STATE_SET_BT_AWAKE, NULL);
+                       }
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT stays in AWAKE mode.\n");
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+               if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+                       if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+                                       == MCI_BT_AWAKE)
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI BT stays in AWAKE mode.\n");
+                       else {
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI SetBT state to SLEEP\n");
+                               ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+                                                NULL);
+                       }
+               } else
+                       ath_dbg(common, ATH_DBG_MCI,
+                               "MCI BT stays in SLEEP mode\n");
+       }
+
+       if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+               ath_dbg(common, ATH_DBG_MCI, "MCI RX broken, skip GPM msgs\n");
+               ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+               skip_gpm = true;
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+               offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+                                         NULL);
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+               mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+               while (more_data == MCI_GPM_MORE) {
+
+                       pgpm = mci->gpm_buf.bf_addr;
+                       offset = ar9003_mci_state(ah,
+                                       MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+                       if (offset == MCI_GPM_INVALID)
+                               break;
+
+                       pgpm += (offset >> 2);
+
+                       /*
+                        * The first dword is timer.
+                        * The real data starts from 2nd dword.
+                        */
+
+                       subtype = MCI_GPM_TYPE(pgpm);
+                       opcode = MCI_GPM_OPCODE(pgpm);
+
+                       if (!skip_gpm) {
+
+                               if (MCI_GPM_IS_CAL_TYPE(subtype))
+                                       ath_mci_cal_msg(sc, subtype,
+                                                       (u8 *) pgpm);
+                               else {
+                                       switch (subtype) {
+                                       case MCI_GPM_COEX_AGENT:
+                                               ath_mci_msg(sc, opcode,
+                                                           (u8 *) pgpm);
+                                               break;
+                                       default:
+                                               break;
+                                       }
+                               }
+                       }
+                       MCI_GPM_RECYCLE(pgpm);
+               }
+       }
+
+       if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI LNA_INFO\n");
+               }
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+                       int value_dbm = ar9003_mci_state(ah,
+                                       MCI_STATE_CONT_RSSI_POWER, NULL);
+
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+                       if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI CONT_INFO: "
+                                       "(tx) pri = %d, pwr = %d dBm\n",
+                                       ar9003_mci_state(ah,
+                                               MCI_STATE_CONT_PRIORITY, NULL),
+                                       value_dbm);
+                       else
+                               ath_dbg(common, ATH_DBG_MCI,
+                                       "MCI CONT_INFO:"
+                                       "(rx) pri = %d,pwr = %d dBm\n",
+                                       ar9003_mci_state(ah,
+                                               MCI_STATE_CONT_PRIORITY, NULL),
+                                       value_dbm);
+               }
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI CONT_NACK\n");
+               }
+
+               if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+                       mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+                       ath_dbg(common, ATH_DBG_MCI, "MCI CONT_RST\n");
+               }
+       }
+
+       if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+           (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+               mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+                            AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+       if (mci_int_rxmsg & 0xfffffffe)
+               ath_dbg(common, ATH_DBG_MCI,
+                       "MCI not processed mci_int_rxmsg = 0x%x\n",
+                       mci_int_rxmsg);
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
new file mode 100644 (file)
index 0000000..b71bded
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MCI_H
+#define MCI_H
+
+#define ATH_MCI_SCHED_BUF_SIZE         (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY          16
+#define ATH_MCI_GPM_BUF_SIZE           (ATH_MCI_GPM_MAX_ENTRY * 16)
+#define ATH_MCI_DEF_BT_PERIOD          40
+#define ATH_MCI_BDR_DUTY_CYCLE         20
+#define ATH_MCI_MAX_DUTY_CYCLE         90
+
+#define ATH_MCI_DEF_AGGR_LIMIT         6 /* in 0.24 ms */
+#define ATH_MCI_MAX_ACL_PROFILE                7
+#define ATH_MCI_MAX_SCO_PROFILE                1
+#define ATH_MCI_MAX_PROFILE            (ATH_MCI_MAX_ACL_PROFILE +\
+                                        ATH_MCI_MAX_SCO_PROFILE)
+
+#define INC_PROF(_mci, _info) do {              \
+               switch (_info->type) {           \
+               case MCI_GPM_COEX_PROFILE_RFCOMM:\
+                       _mci->num_other_acl++;   \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_A2DP:  \
+                       _mci->num_a2dp++;        \
+                       if (!_info->edr)         \
+                               _mci->num_bdr++; \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_HID:   \
+                       _mci->num_hid++;         \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_BNEP:  \
+                       _mci->num_pan++;         \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_VOICE: \
+                       _mci->num_sco++;         \
+                       break;                   \
+               default:                         \
+                       break;                   \
+               }                                \
+       } while (0)
+
+#define DEC_PROF(_mci, _info) do {              \
+               switch (_info->type) {           \
+               case MCI_GPM_COEX_PROFILE_RFCOMM:\
+                       _mci->num_other_acl--;   \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_A2DP:  \
+                       _mci->num_a2dp--;        \
+                       if (!_info->edr)         \
+                               _mci->num_bdr--; \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_HID:   \
+                       _mci->num_hid--;         \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_BNEP:  \
+                       _mci->num_pan--;         \
+                       break;                   \
+               case MCI_GPM_COEX_PROFILE_VOICE: \
+                       _mci->num_sco--;         \
+                       break;                   \
+               default:                         \
+                       break;                   \
+               }                                \
+       } while (0)
+
+#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \
+                        _mci->num_hid + _mci->num_pan + _mci->num_sco)
+
+struct ath_mci_profile_info {
+       u8 type;
+       u8 conn_handle;
+       bool start;
+       bool master;
+       bool edr;
+       u8 voice_type;
+       u16 T;          /* Voice: Tvoice, HID: Tsniff,        in slots */
+       u8 W;           /* Voice: Wvoice, HID: Sniff timeout, in slots */
+       u8 A;           /*                HID: Sniff attempt, in slots */
+       struct list_head list;
+};
+
+struct ath_mci_profile_status {
+       bool is_critical;
+       bool is_link;
+       u8 conn_handle;
+};
+
+struct ath_mci_profile {
+       struct list_head info;
+       DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE);
+       u16 aggr_limit;
+       u8 num_mgmt;
+       u8 num_sco;
+       u8 num_a2dp;
+       u8 num_hid;
+       u8 num_pan;
+       u8 num_other_acl;
+       u8 num_bdr;
+};
+
+
+struct ath_mci_buf {
+       void *bf_addr;          /* virtual addr of desc */
+       dma_addr_t bf_paddr;    /* physical addr of buffer */
+       u32 bf_len;             /* len of data */
+};
+
+struct ath_mci_coex {
+       atomic_t mci_cal_flag;
+       struct ath_mci_buf sched_buf;
+       struct ath_mci_buf gpm_buf;
+       u32 bt_cal_start;
+};
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci);
+void ath_mci_process_profile(struct ath_softc *sc,
+                            struct ath_mci_profile_info *info);
+void ath_mci_process_status(struct ath_softc *sc,
+                           struct ath_mci_profile_status *status);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
+#endif
index 2dcdf63..a439edc 100644 (file)
@@ -307,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
        struct ath_softc *sc = hw->priv;
 
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
        /* The device has to be moved to FULLSLEEP forcibly.
         * Otherwise the chip never moved to full sleep,
         * when no interface is up.
         */
+       ath9k_hw_disable(sc->sc_ah);
        ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
 
        return 0;
@@ -321,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
 static int ath_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
-       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
-       struct ath_softc *sc = hw->priv;
        u32 val;
 
        /*
@@ -334,22 +331,6 @@ static int ath_pci_resume(struct device *device)
        if ((val & 0x0000ff00) != 0)
                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-       ath9k_ps_wakeup(sc);
-       /* Enable LED */
-       ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
-                           AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-       ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
-         /*
-          * Reset key cache to sane defaults (all entries cleared) instead of
-          * semi-random values after suspend/resume.
-          */
-       ath9k_cmn_init_crypto(sc->sc_ah);
-       ath9k_ps_restore(sc);
-
-       sc->ps_idle = true;
-       ath_radio_disable(sc, hw);
-
        return 0;
 }
 
index 67b862c..e031841 100644 (file)
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
 
        return rfilt;
 
-#undef RX_FILTER_PRESERVE
 }
 
 int ath_startrecv(struct ath_softc *sc)
@@ -1838,11 +1837,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                if (sc->sc_flags & SC_OP_RXFLUSH)
                        goto requeue_drop_frag;
 
-               retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
-                                                rxs, &decrypt_error);
-               if (retval)
-                       goto requeue_drop_frag;
-
                rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
                if (rs.rs_tstamp > tsf_lower &&
                    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1846,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
                        rxs->mactime += 0x100000000ULL;
 
+               retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+                                                rxs, &decrypt_error);
+               if (retval)
+                       goto requeue_drop_frag;
+
                /* Ensure we always have an skb to requeue once we are done
                 * processing the current buffer's skb */
                requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1922,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        skb = hdr_skb;
                }
 
-               /*
-                * change the default rx antenna if rx diversity chooses the
-                * other antenna 3 times in a row.
-                */
-               if (sc->rx.defant != rs.rs_antenna) {
-                       if (++sc->rx.rxotherant >= 3)
-                               ath_setdefantenna(sc, rs.rs_antenna);
-               } else {
-                       sc->rx.rxotherant = 0;
+
+               if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+                       /*
+                        * change the default rx antenna if rx diversity
+                        * chooses the other antenna 3 times in a row.
+                        */
+                       if (sc->rx.defant != rs.rs_antenna) {
+                               if (++sc->rx.rxotherant >= 3)
+                                       ath_setdefantenna(sc, rs.rs_antenna);
+                       } else {
+                               sc->rx.rxotherant = 0;
+                       }
+
                }
 
                if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
index 8fcb7e9..6e2f188 100644 (file)
@@ -1006,6 +1006,8 @@ enum {
 #define AR_INTR_ASYNC_MASK                       (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
 #define AR_INTR_ASYNC_MASK_GPIO                  0xFFFC0000
 #define AR_INTR_ASYNC_MASK_GPIO_S                18
+#define AR_INTR_ASYNC_MASK_MCI                   0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S                 7
 
 #define AR_INTR_SYNC_MASK                        (AR_SREV_9340(ah) ? 0x401c : 0x4034)
 #define AR_INTR_SYNC_MASK_GPIO                   0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
 
 #define AR_INTR_ASYNC_CAUSE_CLR                  (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
 #define AR_INTR_ASYNC_CAUSE                      (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI                         0x00000080
+#define AR_INTR_ASYNC_USED                      (AR_INTR_MAC_IRQ | \
+                                                 AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI         0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S       7
+
 
 #define AR_INTR_ASYNC_ENABLE                     (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
 #define AR_INTR_ASYNC_ENABLE_GPIO                0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
 #define AR_RTC_INTR_MASK \
        ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
 
+#define AR_RTC_KEEP_AWAKE      0x7034
+
 /* RTC_DERIVED_* - only for AR9100 */
 
 #define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
 #define AR_DIAG_FRAME_NV0           0x00020000
 #define AR_DIAG_OBS_PT_SEL1         0x000C0000
 #define AR_DIAG_OBS_PT_SEL1_S       18
+#define AR_DIAG_OBS_PT_SEL2         0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S       27
 #define AR_DIAG_FORCE_RX_CLEAR      0x00100000 /* force rx_clear high */
 #define AR_DIAG_IGNORE_VIRT_CS      0x00200000
 #define AR_DIAG_FORCE_CH_IDLE_HIGH  0x00400000
@@ -1752,19 +1766,10 @@ enum {
 
 #define AR_BT_COEX_WL_WEIGHTS0     0x8174
 #define AR_BT_COEX_WL_WEIGHTS1     0x81c4
+#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
+#define AR_BT_COEX_BT_WEIGHTS(_i)  (0x83ac + (_i << 2))
 
-#define AR_BT_COEX_BT_WEIGHTS0     0x83ac
-#define AR_BT_COEX_BT_WEIGHTS1     0x83b0
-#define AR_BT_COEX_BT_WEIGHTS2     0x83b4
-#define AR_BT_COEX_BT_WEIGHTS3     0x83b8
-
-#define AR9300_BT_WGHT                     0xcccc4444
-#define AR9300_STOMP_ALL_WLAN_WGHT0        0xfffffff0
-#define AR9300_STOMP_ALL_WLAN_WGHT1        0xfffffff0
-#define AR9300_STOMP_LOW_WLAN_WGHT0        0x88888880
-#define AR9300_STOMP_LOW_WLAN_WGHT1        0x88888880
-#define AR9300_STOMP_NONE_WLAN_WGHT0       0x00000000
-#define AR9300_STOMP_NONE_WLAN_WGHT1       0x00000000
+#define AR9300_BT_WGHT             0xcccc4444
 
 #define AR_BT_COEX_MODE2           0x817c
 #define AR_BT_BCN_MISS_THRESH      0x000000ff
@@ -1938,37 +1943,277 @@ enum {
 #define AR_PHY_AGC_CONTROL_YCOK_MAX_S          6
 
 /* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN             0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET    0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S  0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL     0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S   1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK       0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S     2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO       0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S     3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST        0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S      4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO       0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S     5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT         0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S       6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM             0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S           8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO        0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S      9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING    0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S  10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING      0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S    11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE        0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S      12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK        (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
+
+#define AR_MCI_COMMAND0                                0x1800
+#define AR_MCI_COMMAND0_HEADER                 0xFF
+#define AR_MCI_COMMAND0_HEADER_S               0
+#define AR_MCI_COMMAND0_LEN                    0x1f00
+#define AR_MCI_COMMAND0_LEN_S                  8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP      0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S    13
+
+#define AR_MCI_COMMAND1                                0x1804
+
+#define AR_MCI_COMMAND2                                0x1808
+#define AR_MCI_COMMAND2_RESET_TX               0x01
+#define AR_MCI_COMMAND2_RESET_TX_S             0
+#define AR_MCI_COMMAND2_RESET_RX               0x02
+#define AR_MCI_COMMAND2_RESET_RX_S             1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES     0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S   2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP        0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S      10
+
+#define AR_MCI_RX_CTRL                         0x180c
+
+#define AR_MCI_TX_CTRL                         0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV                 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S               0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE      0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S    2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ                0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S      3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM         0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S       24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE                    0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM           0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S         0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR                0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S      16
+
+#define AR_MCI_SCHD_TABLE_0                            0x1818
+#define AR_MCI_SCHD_TABLE_1                            0x181c
+#define AR_MCI_GPM_0                                   0x1820
+#define AR_MCI_GPM_1                                   0x1824
+#define AR_MCI_GPM_WRITE_PTR                           0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S                         16
+#define AR_MCI_GPM_BUF_LEN                             0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S                           0
+
+#define AR_MCI_INTERRUPT_RAW                           0x1828
+#define AR_MCI_INTERRUPT_EN                            0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE                   0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S                 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG                   0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S                 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL                 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S               2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR                        0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S              3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL                        0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S              4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL                        0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S              5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL                        0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S              7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL                        0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S              8
+#define AR_MCI_INTERRUPT_RX_MSG                                0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S                      9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE           0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S         10
+#define AR_MCI_INTERRUPT_BT_PRI                                0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S                      11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH                 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S               27
+#define AR_MCI_INTERRUPT_BT_FREQ                       0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S                     28
+#define AR_MCI_INTERRUPT_BT_STOMP                      0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S                    29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ                    0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S                  30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT             0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S           31
+
+#define AR_MCI_INTERRUPT_DEFAULT    (AR_MCI_INTERRUPT_SW_MSG_DONE        | \
+                                    AR_MCI_INTERRUPT_RX_INVALID_HDR      | \
+                                    AR_MCI_INTERRUPT_RX_HW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_RX_SW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_TX_HW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_TX_SW_MSG_FAIL      | \
+                                    AR_MCI_INTERRUPT_RX_MSG              | \
+                                    AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+                                    AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+                                       AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+                                       AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+                                       AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT                          0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN                       0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW                    0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN                     0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET           0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S         0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL            0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S          1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK              0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S            2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO              0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S            3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST               0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S             4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO              0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S            5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT                        0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S              6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM                    0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S                  8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO               0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S             9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING           0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S         10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING             0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S           11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE               0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S             12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK         (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO  | \
                                          AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
                                          AR_MCI_INTERRUPT_RX_MSG_LNA_INFO   | \
                                          AR_MCI_INTERRUPT_RX_MSG_CONT_NACK  | \
                                          AR_MCI_INTERRUPT_RX_MSG_CONT_INFO  | \
                                          AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
 
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+                                        AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING  | \
+                                        AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+                                        AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+                                        AR_MCI_INTERRUPT_RX_MSG_LNA_INFO    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_NACK   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_INFO   | \
+                                        AR_MCI_INTERRUPT_RX_MSG_CONT_RST    | \
+                                        AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT                                 0x1840
+
+#define AR_MCI_RX_STATUS                       0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX          0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S                8
+#define AR_MCI_RX_REMOTE_SLEEP                 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S               12
+#define AR_MCI_RX_MCI_CLK_REQ                  0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S                        13
+
+#define AR_MCI_CONT_STATUS                     0x1848
+#define AR_MCI_CONT_RSSI_POWER                 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S               0
+#define AR_MCI_CONT_RRIORITY                   0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S                 8
+#define AR_MCI_CONT_TXRX                       0x00010000
+#define AR_MCI_CONT_TXRX_S                     16
+
+#define AR_MCI_BT_PRI0                         0x184c
+#define AR_MCI_BT_PRI1                         0x1850
+#define AR_MCI_BT_PRI2                         0x1854
+#define AR_MCI_BT_PRI3                         0x1858
+#define AR_MCI_BT_PRI                          0x185c
+#define AR_MCI_WL_FREQ0                                0x1860
+#define AR_MCI_WL_FREQ1                                0x1864
+#define AR_MCI_WL_FREQ2                                0x1868
+#define AR_MCI_GAIN                            0x186c
+#define AR_MCI_WBTIMER1                                0x1870
+#define AR_MCI_WBTIMER2                                0x1874
+#define AR_MCI_WBTIMER3                                0x1878
+#define AR_MCI_WBTIMER4                                0x187c
+#define AR_MCI_MAXGAIN                         0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL                 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0                  0x1888
+#define AR_MCI_HW_SCHD_TBL_D1                  0x188c
+#define AR_MCI_HW_SCHD_TBL_D2                  0x1890
+#define AR_MCI_HW_SCHD_TBL_D3                  0x1894
+#define AR_MCI_TX_PAYLOAD0                     0x1898
+#define AR_MCI_TX_PAYLOAD1                     0x189c
+#define AR_MCI_TX_PAYLOAD2                     0x18a0
+#define AR_MCI_TX_PAYLOAD3                     0x18a4
+#define AR_BTCOEX_WBTIMER                      0x18a8
+
+#define AR_BTCOEX_CTRL                                 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE                     0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S                   0
+#define AR_BTCOEX_CTRL_WBTIMER_EN                      0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S                    1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN                     0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S                   2
+#define AR_BTCOEX_CTRL_LNA_SHARED                      0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S                    3
+#define AR_BTCOEX_CTRL_PA_SHARED                       0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S                     4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN          0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S                5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN       0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S     6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS                    0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S                  7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK                   0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S                 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH                     0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S                   12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN                     0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S                   19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK                     0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S                   20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN                  0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S                        28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR                    0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S                  29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10                  0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S                        30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY                   0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S                 31
+
+#define AR_BTCOEX_WL_WEIGHTS0                          0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1                          0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2                          0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3                          0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x)                                (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA                               0x1940
+#define AR_BTCOEX_RFGAIN_CTRL                          0x1944
+
+#define AR_BTCOEX_CTRL2                                        0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH                   0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S                 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK                  0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S                        19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT                    0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S                  22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL                   0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S                 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL                 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S               24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE                0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S      25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE          0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S        0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL     0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S   1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT   0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN            0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S          17
+#define AR_GLB_DS_JTAG_DISABLE              0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S            18
+
+#define AR_BTCOEX_RC                    0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x)        (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG                   0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR          0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY          0x1a58
+
+#define AR_MCI_SCHD_TABLE_2             0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED   0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED    0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S  1
+
+#define AR_BTCOEX_CTRL3               0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT      0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S    0
+
 
 #endif
index 03b0a65..9e65c31 100644 (file)
@@ -179,6 +179,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
                spin_lock_bh(&txq->axq_lock);
        }
 
+       if (tid->baw_head == tid->baw_tail) {
+               tid->state &= ~AGGR_ADDBA_COMPLETE;
+               tid->state &= ~AGGR_CLEANUP;
+       }
+
        spin_unlock_bh(&txq->axq_lock);
 }
 
@@ -556,15 +561,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                spin_unlock_bh(&txq->axq_lock);
        }
 
-       if (tid->state & AGGR_CLEANUP) {
+       if (tid->state & AGGR_CLEANUP)
                ath_tx_flush_tid(sc, tid);
 
-               if (tid->baw_head == tid->baw_tail) {
-                       tid->state &= ~AGGR_ADDBA_COMPLETE;
-                       tid->state &= ~AGGR_CLEANUP;
-               }
-       }
-
        rcu_read_unlock();
 
        if (needreset) {
@@ -601,6 +600,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
        struct sk_buff *skb;
        struct ieee80211_tx_info *tx_info;
        struct ieee80211_tx_rate *rates;
+       struct ath_mci_profile *mci = &sc->btcoex.mci;
        u32 max_4ms_framelen, frmlen;
        u16 aggr_limit, legacy = 0;
        int i;
@@ -645,7 +645,9 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
        if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
                return 0;
 
-       if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+       if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+               aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+       else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
                aggr_limit = min((max_4ms_framelen * 3) / 8,
                                 (u32)ATH_AMPDU_LIMIT_MAX);
        else
@@ -1952,7 +1954,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                skb_pull(skb, padsize);
        }
 
-       if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+       if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
                sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
                ath_dbg(common, ATH_DBG_PS,
                        "Going back to sleep after having received TX status (0x%lx)\n",
index f06e069..5518592 100644 (file)
@@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw)
 
        mutex_lock(&ar->mutex);
        if (IS_ACCEPTING_CMD(ar)) {
-               rcu_assign_pointer(ar->beacon_iter, NULL);
+               RCU_INIT_POINTER(ar->beacon_iter, NULL);
 
                carl9170_led_set_state(ar, 0);
 
@@ -678,7 +678,7 @@ unlock:
                vif_priv->active = false;
                bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
                ar->vifs--;
-               rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+               RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
                list_del_rcu(&vif_priv->list);
                mutex_unlock(&ar->mutex);
                synchronize_rcu();
@@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
        WARN_ON(vif_priv->enable_beacon);
        vif_priv->enable_beacon = false;
        list_del_rcu(&vif_priv->list);
-       rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+       RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
 
        if (vif == main_vif) {
                rcu_read_unlock();
@@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
                }
 
                for (i = 0; i < CARL9170_NUM_TID; i++)
-                       rcu_assign_pointer(sta_info->agg[i], NULL);
+                       RCU_INIT_POINTER(sta_info->agg[i], NULL);
 
                sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
                sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
                        struct carl9170_sta_tid *tid_info;
 
                        tid_info = rcu_dereference(sta_info->agg[i]);
-                       rcu_assign_pointer(sta_info->agg[i], NULL);
+                       RCU_INIT_POINTER(sta_info->agg[i], NULL);
 
                        if (!tid_info)
                                continue;
@@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                        spin_unlock_bh(&ar->tx_ampdu_list_lock);
                }
 
-               rcu_assign_pointer(sta_info->agg[tid], NULL);
+               RCU_INIT_POINTER(sta_info->agg[tid], NULL);
                rcu_read_unlock();
 
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
index 59472e1..d19a9ee 100644 (file)
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
                         *    feedback either [CTL_REQ_TX_STATUS not set]
                         */
 
-                       dev_kfree_skb_any(skb);
+                       ieee80211_free_txskb(ar->hw, skb);
                        return;
                } else {
                        /*
@@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 
 err_free:
        ar->tx_dropped++;
-       dev_kfree_skb_any(skb);
+       ieee80211_free_txskb(ar->hw, skb);
 }
 
 void carl9170_tx_scheduler(struct ar9170 *ar)
index b44e309..d58aa1b 100644 (file)
@@ -26,7 +26,8 @@ DHDOFILES = \
        dhd_sdio.o      \
        dhd_linux.o \
        bcmsdh.o \
-       bcmsdh_sdmmc.o
+       bcmsdh_sdmmc.o \
+       sdio_chip.o
 
 obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
 brcmfmac-objs += $(DHDOFILES)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644 (file)
index d7d3afd..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE          0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE       0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE          0x18002000
-#define BCM4329_RAMSIZE                        0x48000
-/* firmware name */
-#define BCM4329_FW_NAME                        "brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME                        "brcm/bcm4329-fullmac-4.txt"
-
-#endif                         /* _bcmchip_h_ */
index 89ff94d..74933dc 100644 (file)
@@ -40,7 +40,8 @@
 
 static void brcmf_sdioh_irqhandler(struct sdio_func *func)
 {
-       struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+       struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
 
        brcmf_dbg(TRACE, "***IRQHandler\n");
 
@@ -222,19 +223,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
        return sdiodev->regfail;
 }
 
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags,
-                     u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+                                    uint flags, uint width, u32 *addr)
 {
-       int status;
-       uint incr_fix;
-       uint width;
-       uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+       uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
        int err = 0;
 
-       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
        /* Async not implemented yet */
        if (flags & SDIO_REQ_ASYNC)
                return -ENOTSUPP;
@@ -247,29 +241,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                sdiodev->sbwad = bar0;
        }
 
-       addr &= SBSDIO_SB_OFT_ADDR_MASK;
+       *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+       if (width == 4)
+               *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+       return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, u8 *buf, uint nbytes)
+{
+       struct sk_buff *mypkt;
+       int err;
+
+       mypkt = brcmu_pkt_buf_get_skb(nbytes);
+       if (!mypkt) {
+               brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+                         nbytes);
+               return -EIO;
+       }
+
+       err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+       if (!err)
+               memcpy(buf, mypkt->data, nbytes);
+
+       brcmu_pkt_buf_free_skb(mypkt);
+       return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt)
+{
+       uint incr_fix;
+       uint width;
+       int err = 0;
+
+       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+                 fn, addr, pkt->len);
+
+       width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+       err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+       if (err)
+               return err;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+       err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+                                        fn, addr, pkt);
+
+       return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                           uint flags, struct sk_buff_head *pktq)
+{
+       uint incr_fix;
+       uint width;
+       int err = 0;
+
+       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+                 fn, addr, pktq->qlen);
+
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-       if (width == 4)
-               addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+       err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+       if (err)
+               return err;
 
-       status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
-                                           fn, addr, width, nbytes, buf, pkt);
+       incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+       err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+                                       pktq);
 
-       return status;
+       return err;
 }
 
 int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+                     uint flags, u8 *buf, uint nbytes)
+{
+       struct sk_buff *mypkt;
+       int err;
+
+       mypkt = brcmu_pkt_buf_get_skb(nbytes);
+       if (!mypkt) {
+               brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+                         nbytes);
+               return -EIO;
+       }
+
+       memcpy(mypkt->data, buf, nbytes);
+       err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+       brcmu_pkt_buf_free_skb(mypkt);
+       return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt)
 {
        uint incr_fix;
        uint width;
        uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
        int err = 0;
 
-       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+       brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+                 fn, addr, pkt->len);
 
        /* Async not implemented yet */
        if (flags & SDIO_REQ_ASYNC)
@@ -291,18 +370,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
        return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
-                                         addr, width, nbytes, buf, pkt);
+                                         addr, pkt);
 }
 
 int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
                        u8 *buf, uint nbytes)
 {
+       struct sk_buff *mypkt;
+       bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+       int err;
+
        addr &= SBSDIO_SB_OFT_ADDR_MASK;
        addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-       return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
-               (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
-               addr, 4, nbytes, buf, NULL);
+       mypkt = brcmu_pkt_buf_get_skb(nbytes);
+       if (!mypkt) {
+               brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+                         nbytes);
+               return -EIO;
+       }
+
+       /* For a write, copy the buffer data into the packet. */
+       if (write)
+               memcpy(mypkt->data, buf, nbytes);
+
+       err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+                                        SDIO_FUNC_1, addr, mypkt);
+
+       /* For a read, copy the packet data back to the buffer. */
+       if (!err && !write)
+               memcpy(buf, mypkt->data, nbytes);
+
+       brcmu_pkt_buf_free_skb(mypkt);
+       return err;
 }
 
 int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -333,7 +433,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        sdiodev->sbwad = SI_ENUM_BASE;
 
        /* try to attach to the target device */
-       sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+       sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
        if (!sdiodev->bus) {
                brcmf_dbg(ERROR, "device attach failed\n");
                ret = -ENODEV;
index bbaeb2d..b416e27 100644 (file)
@@ -204,62 +204,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
        return err_ret;
 }
 
+/* precondition: host controller is claimed */
 static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
-                          uint write, uint func, uint addr,
-                          struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+                        uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+       int err_ret = 0;
+
+       if ((write) && (!fifo)) {
+               err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+                                          ((u8 *) (pkt->data)), pktlen);
+       } else if (write) {
+               err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+                                          ((u8 *) (pkt->data)), pktlen);
+       } else if (fifo) {
+               err_ret = sdio_readsb(sdiodev->func[func],
+                                     ((u8 *) (pkt->data)), addr, pktlen);
+       } else {
+               err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+                                            ((u8 *) (pkt->data)),
+                                            addr, pktlen);
+       }
+
+       return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+                         uint write, uint func, uint addr,
+                         struct sk_buff_head *pktq)
 {
        bool fifo = (fix_inc == SDIOH_DATA_FIX);
        u32 SGCount = 0;
        int err_ret = 0;
 
-       struct sk_buff *pnext;
+       struct sk_buff *pkt;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
        /* Claim host controller */
        sdio_claim_host(sdiodev->func[func]);
-       for (pnext = pkt; pnext; pnext = pnext->next) {
-               uint pkt_len = pnext->len;
+
+       skb_queue_walk(pktq, pkt) {
+               uint pkt_len = pkt->len;
                pkt_len += 3;
                pkt_len &= 0xFFFFFFFC;
 
-               if ((write) && (!fifo)) {
-                       err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-                                                  ((u8 *) (pnext->data)),
-                                                  pkt_len);
-               } else if (write) {
-                       err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-                                                  ((u8 *) (pnext->data)),
-                                                  pkt_len);
-               } else if (fifo) {
-                       err_ret = sdio_readsb(sdiodev->func[func],
-                                             ((u8 *) (pnext->data)),
-                                             addr, pkt_len);
-               } else {
-                       err_ret = sdio_memcpy_fromio(sdiodev->func[func],
-                                                    ((u8 *) (pnext->data)),
-                                                    addr, pkt_len);
-               }
-
+               err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+                                                  addr, pkt, pkt_len);
                if (err_ret) {
                        brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
-                                 write ? "TX" : "RX", pnext, SGCount, addr,
+                                 write ? "TX" : "RX", pkt, SGCount, addr,
                                  pkt_len, err_ret);
                } else {
                        brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
-                                 write ? "TX" : "RX", pnext, SGCount, addr,
+                                 write ? "TX" : "RX", pkt, SGCount, addr,
                                  pkt_len);
                }
-
                if (!fifo)
                        addr += pkt_len;
-               SGCount++;
 
+               SGCount++;
        }
 
        /* Release host controller */
@@ -270,91 +283,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
 }
 
 /*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case,  it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
  */
 int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
                               uint fix_inc, uint write, uint func, uint addr,
-                              uint reg_width, uint buflen_u, u8 *buffer,
                               struct sk_buff *pkt)
 {
-       int Status;
-       struct sk_buff *mypkt = NULL;
+       int status;
+       uint pkt_len = pkt->len;
+       bool fifo = (fix_inc == SDIOH_DATA_FIX);
 
        brcmf_dbg(TRACE, "Enter\n");
 
+       if (pkt == NULL)
+               return -EINVAL;
+
        brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
-       /* Case 1: we don't have a packet. */
-       if (pkt == NULL) {
-               brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
-                         write ? "TX" : "RX", buflen_u);
-               mypkt = brcmu_pkt_buf_get_skb(buflen_u);
-               if (!mypkt) {
-                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
-                                 buflen_u);
-                       return -EIO;
-               }
-
-               /* For a write, copy the buffer data into the packet. */
-               if (write)
-                       memcpy(mypkt->data, buffer, buflen_u);
-
-               Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-                                                   func, addr, mypkt);
-
-               /* For a read, copy the packet data back to the buffer. */
-               if (!write)
-                       memcpy(buffer, mypkt->data, buflen_u);
-
-               brcmu_pkt_buf_free_skb(mypkt);
-       } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
-               /*
-                * Case 2: We have a packet, but it is unaligned.
-                * In this case, we cannot have a chain (pkt->next == NULL)
-                */
-               brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
-                         write ? "TX" : "RX", pkt->len);
-               mypkt = brcmu_pkt_buf_get_skb(pkt->len);
-               if (!mypkt) {
-                       brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
-                                 pkt->len);
-                       return -EIO;
-               }
-
-               /* For a write, copy the buffer data into the packet. */
-               if (write)
-                       memcpy(mypkt->data, pkt->data, pkt->len);
 
-               Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-                                                   func, addr, mypkt);
+       /* Claim host controller */
+       sdio_claim_host(sdiodev->func[func]);
 
-               /* For a read, copy the packet data back to the buffer. */
-               if (!write)
-                       memcpy(pkt->data, mypkt->data, mypkt->len);
+       pkt_len += 3;
+       pkt_len &= (uint)~3;
 
-               brcmu_pkt_buf_free_skb(mypkt);
-       } else {                /* case 3: We have a packet and
-                                it is aligned. */
-               brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
-                         write ? "Tx" : "Rx");
-               Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
-                                                   func, addr, pkt);
+       status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+                                          addr, pkt, pkt_len);
+       if (status) {
+               brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+                         write ? "TX" : "RX", pkt, addr, pkt_len, status);
+       } else {
+               brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+                         write ? "TX" : "RX", pkt, addr, pkt_len);
        }
 
-       return Status;
+       /* Release host controller */
+       sdio_release_host(sdiodev->func[func]);
+
+       return status;
 }
 
 /* Read client card reg */
@@ -494,6 +461,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 {
        int ret = 0;
        struct brcmf_sdio_dev *sdiodev;
+       struct brcmf_bus *bus_if;
        brcmf_dbg(TRACE, "Enter\n");
        brcmf_dbg(TRACE, "func->class=%x\n", func->class);
        brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,22 +473,31 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
                        brcmf_dbg(ERROR, "card private drvdata occupied\n");
                        return -ENXIO;
                }
+               bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+               if (!bus_if)
+                       return -ENOMEM;
                sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
-               if (!sdiodev)
+               if (!sdiodev) {
+                       kfree(bus_if);
                        return -ENOMEM;
+               }
+               sdiodev->dev = &func->card->dev;
                sdiodev->func[0] = func->card->sdio_func[0];
                sdiodev->func[1] = func;
-               dev_set_drvdata(&func->card->dev, sdiodev);
+               bus_if->bus_priv = sdiodev;
+               bus_if->type = SDIO_BUS;
+               dev_set_drvdata(&func->card->dev, bus_if);
 
                atomic_set(&sdiodev->suspend, false);
                init_waitqueue_head(&sdiodev->request_byte_wait);
                init_waitqueue_head(&sdiodev->request_word_wait);
-               init_waitqueue_head(&sdiodev->request_packet_wait);
+               init_waitqueue_head(&sdiodev->request_chain_wait);
                init_waitqueue_head(&sdiodev->request_buffer_wait);
        }
 
        if (func->num == 2) {
-               sdiodev = dev_get_drvdata(&func->card->dev);
+               bus_if = dev_get_drvdata(&func->card->dev);
+               sdiodev = bus_if->bus_priv;
                if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
                        return -ENODEV;
                sdiodev->func[2] = func;
@@ -534,6 +511,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
 
 static void brcmf_ops_sdio_remove(struct sdio_func *func)
 {
+       struct brcmf_bus *bus_if;
        struct brcmf_sdio_dev *sdiodev;
        brcmf_dbg(TRACE, "Enter\n");
        brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +520,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
        brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
 
        if (func->num == 2) {
-               sdiodev = dev_get_drvdata(&func->card->dev);
+               bus_if = dev_get_drvdata(&func->card->dev);
+               sdiodev = bus_if->bus_priv;
                brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
                brcmf_sdio_remove(sdiodev);
                dev_set_drvdata(&func->card->dev, NULL);
+               kfree(bus_if);
                kfree(sdiodev);
        }
 }
@@ -556,11 +536,12 @@ static int brcmf_sdio_suspend(struct device *dev)
        mmc_pm_flag_t sdio_flags;
        struct brcmf_sdio_dev *sdiodev;
        struct sdio_func *func = dev_to_sdio_func(dev);
+       struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev);
        int ret = 0;
 
        brcmf_dbg(TRACE, "\n");
 
-       sdiodev = dev_get_drvdata(&func->card->dev);
+       sdiodev = bus_if->bus_priv;
 
        atomic_set(&sdiodev->suspend, true);
 
@@ -585,8 +566,9 @@ static int brcmf_sdio_resume(struct device *dev)
 {
        struct brcmf_sdio_dev *sdiodev;
        struct sdio_func *func = dev_to_sdio_func(dev);
+       struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev);
 
-       sdiodev = dev_get_drvdata(&func->card->dev);
+       sdiodev = bus_if->bus_priv;
        brcmf_sdio_wdtmr_enable(sdiodev, true);
        atomic_set(&sdiodev->suspend, false);
        return 0;
@@ -610,17 +592,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
 #endif /* CONFIG_PM_SLEEP */
 };
 
-/* bus register interface */
-int brcmf_bus_register(void)
+static void __exit brcmf_sdio_exit(void)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
-       return sdio_register_driver(&brcmf_sdmmc_driver);
+       sdio_unregister_driver(&brcmf_sdmmc_driver);
 }
 
-void brcmf_bus_unregister(void)
+static int __init brcmf_sdio_init(void)
 {
+       int ret;
+
        brcmf_dbg(TRACE, "Enter\n");
 
-       sdio_unregister_driver(&brcmf_sdmmc_driver);
+       ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+       if (ret)
+               brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+       return ret;
 }
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
index 4645766..b68d136 100644 (file)
@@ -87,7 +87,7 @@
 #define TOE_TX_CSUM_OL         0x00000001
 #define TOE_RX_CSUM_OL         0x00000002
 
-#define        BRCMF_BSS_INFO_VERSION  108 /* current ver of brcmf_bss_info struct */
+#define        BRCMF_BSS_INFO_VERSION  108 /* curr ver of brcmf_bss_info_le struct */
 
 /* size of brcmf_scan_params not including variable length array */
 #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
 
 /* For supporting multiple interfaces */
 #define BRCMF_MAX_IFS  16
-#define BRCMF_DEL_IF   -0xe
-#define BRCMF_BAD_IF   -0xf
 
 #define DOT11_BSSTYPE_ANY                      2
 #define DOT11_MAX_DEFAULT_KEYS 4
@@ -365,7 +363,7 @@ struct brcmf_pkt_filter_enable_le {
  * Applications MUST CHECK ie_offset field and length field to access IEs and
  * next bss_info structure in a vector (in struct brcmf_scan_results)
  */
-struct brcmf_bss_info {
+struct brcmf_bss_info_le {
        __le32 version;         /* version field */
        __le32 length;          /* byte length of data in this record,
                                 * starting at version and including IEs
@@ -466,14 +464,13 @@ struct brcmf_scan_results {
        u32 buflen;
        u32 version;
        u32 count;
-       struct brcmf_bss_info bss_info[1];
+       struct brcmf_bss_info_le bss_info_le[];
 };
 
 struct brcmf_scan_results_le {
        __le32 buflen;
        __le32 version;
        __le32 count;
-       struct brcmf_bss_info bss_info[1];
 };
 
 /* used for association with a specific BSSID and chanspec list */
@@ -493,10 +490,6 @@ struct brcmf_join_params {
        struct brcmf_assoc_params_le params_le;
 };
 
-/* size of brcmf_scan_results not including variable length array */
-#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
-       (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
-
 /* incremental scan results struct */
 struct brcmf_iscan_results {
        union {
@@ -511,7 +504,7 @@ struct brcmf_iscan_results {
 
 /* size of brcmf_iscan_results not including variable length array */
 #define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
-       (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+       (sizeof(struct brcmf_scan_results) + \
         offsetof(struct brcmf_iscan_results, results))
 
 struct brcmf_wsec_key {
@@ -578,8 +571,14 @@ struct brcmf_dcmd {
        uint needed;            /* bytes needed (optional) */
 };
 
+struct brcmf_bus {
+       u8 type;                /* bus type */
+       void *bus_priv;         /* pointer to bus private structure */
+       enum brcmf_bus_state state;
+};
+
 /* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus;              /* device bus info */
+struct brcmf_sdio;             /* device bus info */
 struct brcmf_proto;    /* device communication protocol info */
 struct brcmf_info;     /* device driver info */
 struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -587,15 +586,16 @@ struct brcmf_cfg80211_dev; /* cfg80211 device info */
 /* Common structure for module and instance linkage */
 struct brcmf_pub {
        /* Linkage ponters */
-       struct brcmf_bus *bus;
+       struct brcmf_sdio *bus;
+       struct brcmf_bus *bus_if;
        struct brcmf_proto *prot;
        struct brcmf_info *info;
        struct brcmf_cfg80211_dev *config;
+       struct device *dev;             /* fullmac dongle device pointer */
 
        /* Internal brcmf items */
        bool up;                /* Driver up/down (to OS) */
        bool txoff;             /* Transmit flow-controlled */
-       enum brcmf_bus_state busstate;
        uint hdrlen;            /* Total BRCMF header length (proto + bus) */
        uint maxctl;            /* Max size rxctl request from proto to bus */
        uint rxsz;              /* Rx buffer size bus module should use */
@@ -663,7 +663,6 @@ struct brcmf_pub {
 
        u8 country_code[BRCM_CNTRY_BUF_SZ];
        char eventmask[BRCMF_EVENTING_MASK_LEN];
-
 };
 
 struct brcmf_if_event {
@@ -688,8 +687,8 @@ extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
  * Returned structure should have bus and prot pointers filled in.
  * bus_hdrlen specifies required headroom for bus module header.
  */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
-                                     uint bus_hdrlen);
+extern struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus,
+                                     uint bus_hdrlen, struct device *dev);
 extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
 extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
@@ -706,7 +705,16 @@ extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
 extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
-                        struct sk_buff *rxp, int numpkt);
+                          struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct brcmf_pub *drvr, int ifidx,
+                                  struct sk_buff *pkt)
+{
+       struct sk_buff_head q;
+
+       skb_queue_head_init(&q);
+       skb_queue_tail(&q, pkt);
+       brcmf_rx_frame(drvr, ifidx, &q);
+}
 
 /* Return pointer to interface name */
 extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
@@ -731,11 +739,8 @@ extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
                              void *pktdata, struct brcmf_event_msg *,
                              void **data_ptr);
 
-extern void brcmf_c_init(void);
-
 extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
-                       struct net_device *ndev, char *name, u8 *mac_addr,
-                       u32 flags, u8 bssidx);
+                       char *name, u8 *mac_addr);
 extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
 
 /* Send packet to dongle via data channel */
index a249407..1841f99 100644 (file)
  * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
  */
 
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
-
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
-
 /* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+extern void brcmf_sdbrcm_bus_stop(struct device *dev);
 
 /* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+extern int brcmf_sdbrcm_bus_init(struct device *dev);
 
 /* Send a data frame to the dongle.  Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+extern int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *txp);
 
 /* Send/receive a control message to/from the dongle.
  * Expects caller to enforce a single outstanding transaction.
  */
 extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen);
 
 extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
 #endif                         /* _BRCMF_BUS_H_ */
index e34c5c3..a527d5d 100644 (file)
@@ -116,7 +116,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
                len = CDC_MAX_MSG_SIZE;
 
        /* Send request */
-       return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
+       return brcmf_sdbrcm_bus_txctl(drvr->dev, (unsigned char *)&prot->msg,
                                      len);
 }
 
@@ -128,7 +128,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
        brcmf_dbg(TRACE, "Enter\n");
 
        do {
-               ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+               ret = brcmf_sdbrcm_bus_rxctl(drvr->dev,
                                (unsigned char *)&prot->msg,
                                len + sizeof(struct brcmf_proto_cdc_dcmd));
                if (ret < 0)
@@ -280,7 +280,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
        struct brcmf_proto *prot = drvr->prot;
        int ret = -1;
 
-       if (drvr->busstate == BRCMF_BUS_DOWN) {
+       if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
                brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
                return ret;
        }
index 8918261..69f335a 100644 (file)
@@ -32,8 +32,6 @@
 #define PKTFILTER_BUF_SIZE             2048
 #define BRCMF_ARPOL_MODE               0xb     /* agent|snoop|peer_autoreply */
 
-int brcmf_msg_level;
-
 #define MSGTRACE_VERSION       1
 
 #define BRCMF_PKT_FILTER_FIXED_LEN     offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,19 +83,6 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
        return len;
 }
 
-void brcmf_c_init(void)
-{
-       /* Init global variables at run-time, not as part of the declaration.
-        * This is required to support init/de-init of the driver.
-        * Initialization
-        * of globals as part of the declaration results in non-deterministic
-        * behaviour since the value of the globals may be different on the
-        * first time that the driver is initialized vs subsequent
-        * initializations.
-        */
-       brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
 bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
                      struct sk_buff *pkt, int prec)
 {
@@ -488,10 +473,9 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
 
                if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
                        if (ifevent->action == BRCMF_E_IF_ADD)
-                               brcmf_add_if(drvr_priv, ifevent->ifidx, NULL,
+                               brcmf_add_if(drvr_priv, ifevent->ifidx,
                                             event->ifname,
-                                            pvt_data->eth.h_dest,
-                                            ifevent->flags, ifevent->bssidx);
+                                            pvt_data->eth.h_dest);
                        else
                                brcmf_del_if(drvr_priv, ifevent->ifidx);
                } else {
index 4acbac5..58d92bc 100644 (file)
@@ -43,7 +43,6 @@
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
 #include "wl_cfg80211.h"
-#include "bcmchip.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -58,7 +57,6 @@ struct brcmf_if {
        struct net_device *ndev;
        struct net_device_stats stats;
        int idx;                /* iface idx in dongle */
-       int state;              /* interface state */
        u8 mac_addr[ETH_ALEN];  /* assigned MAC address */
 };
 
@@ -78,22 +76,9 @@ struct brcmf_info {
 };
 
 /* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
 module_param(brcmf_msg_level, int, 0);
 
-
-static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
-{
-       int i = 0;
-
-       while (i < BRCMF_MAX_IFS) {
-               if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
-                       return i;
-               i++;
-       }
-
-       return BRCMF_BAD_IF;
-}
-
 int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
 {
        int i = BRCMF_MAX_IFS;
@@ -285,14 +270,9 @@ _brcmf_set_mac_address(struct work_struct *work)
 
 static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
        struct sockaddr *sa = (struct sockaddr *)addr;
-       int ifidx;
-
-       ifidx = brcmf_net2idx(drvr_priv, ndev);
-       if (ifidx == BRCMF_BAD_IF)
-               return -1;
 
        memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
        schedule_work(&drvr_priv->setmacaddr_work);
@@ -301,13 +281,8 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
 
 static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
-       int ifidx;
-
-       ifidx = brcmf_net2idx(drvr_priv, ndev);
-       if (ifidx == BRCMF_BAD_IF)
-               return;
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
 
        schedule_work(&drvr_priv->multicast_work);
 }
@@ -317,7 +292,7 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
        struct brcmf_info *drvr_priv = drvr->info;
 
        /* Reject if down */
-       if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+       if (!drvr->up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
                return -ENODEV;
 
        /* Update multicast statistic */
@@ -335,29 +310,29 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
        brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
 
        /* Use bus module to send data frame */
-       return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+       return brcmf_sdbrcm_bus_txdata(drvr->dev, pktbuf);
 }
 
 static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        int ret;
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
-       int ifidx;
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
 
        brcmf_dbg(TRACE, "Enter\n");
 
        /* Reject if down */
-       if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
-               brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
-                         drvr_priv->pub.up, drvr_priv->pub.busstate);
+       if (!drvr_priv->pub.up ||
+           (drvr_priv->pub.bus_if->state == BRCMF_BUS_DOWN)) {
+               brcmf_dbg(ERROR, "xmit rejected pub.up=%d state=%d\n",
+                         drvr_priv->pub.up,
+                         drvr_priv->pub.bus_if->state);
                netif_stop_queue(ndev);
                return -ENODEV;
        }
 
-       ifidx = brcmf_net2idx(drvr_priv, ndev);
-       if (ifidx == BRCMF_BAD_IF) {
-               brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+       if (!drvr_priv->iflist[ifp->idx]) {
+               brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx);
                netif_stop_queue(ndev);
                return -ENODEV;
        }
@@ -367,20 +342,20 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                struct sk_buff *skb2;
 
                brcmf_dbg(INFO, "%s: insufficient headroom\n",
-                         brcmf_ifname(&drvr_priv->pub, ifidx));
+                         brcmf_ifname(&drvr_priv->pub, ifp->idx));
                drvr_priv->pub.tx_realloc++;
                skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
                dev_kfree_skb(skb);
                skb = skb2;
                if (skb == NULL) {
                        brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
-                                 brcmf_ifname(&drvr_priv->pub, ifidx));
+                                 brcmf_ifname(&drvr_priv->pub, ifp->idx));
                        ret = -ENOMEM;
                        goto done;
                }
        }
 
-       ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+       ret = brcmf_sendpkt(&drvr_priv->pub, ifp->idx, skb);
 
 done:
        if (ret)
@@ -424,26 +399,21 @@ static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
        return bcmerror;
 }
 
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
-                 int numpkt)
+void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
+                   struct sk_buff_head *skb_list)
 {
        struct brcmf_info *drvr_priv = drvr->info;
        unsigned char *eth;
        uint len;
        void *data;
-       struct sk_buff *pnext, *save_pktbuf;
-       int i;
+       struct sk_buff *skb, *pnext;
        struct brcmf_if *ifp;
        struct brcmf_event_msg event;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       save_pktbuf = skb;
-
-       for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
-               pnext = skb->next;
-               skb->next = NULL;
+       skb_queue_walk_safe(skb_list, skb, pnext) {
+               skb_unlink(skb, skb_list);
 
                /* Get the protocol, maintain skb around eth_type_trans()
                 * The main reason for this hack is for the limitation of
@@ -464,6 +434,12 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
                if (ifp == NULL)
                        ifp = drvr_priv->iflist[0];
 
+               if (!ifp || !ifp->ndev ||
+                   ifp->ndev->reg_state != NETREG_REGISTERED) {
+                       brcmu_pkt_buf_free_skb(skb);
+                       continue;
+               }
+
                skb->dev = ifp->ndev;
                skb->protocol = eth_type_trans(skb, skb->dev);
 
@@ -482,12 +458,10 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
                                          skb_mac_header(skb),
                                          &event, &data);
 
-               if (drvr_priv->iflist[ifidx] &&
-                   !drvr_priv->iflist[ifidx]->state)
+               if (drvr_priv->iflist[ifidx]) {
                        ifp = drvr_priv->iflist[ifidx];
-
-               if (ifp->ndev)
                        ifp->ndev->last_rx = jiffies;
+               }
 
                drvr->dstats.rx_bytes += skb->len;
                drvr->rx_packets++;     /* Local count */
@@ -524,19 +498,11 @@ void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
 
 static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
-       struct brcmf_if *ifp;
-       int ifidx;
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       ifidx = brcmf_net2idx(drvr_priv, ndev);
-       if (ifidx == BRCMF_BAD_IF)
-               return NULL;
-
-       ifp = drvr_priv->iflist[ifidx];
-
        if (drvr_priv->pub.up)
                /* Use the protocol to get dongle stats */
                brcmf_proto_dstats(&drvr_priv->pub);
@@ -637,14 +603,12 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
                                    struct ethtool_drvinfo *info)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
 
        sprintf(info->driver, KBUILD_MODNAME);
        sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
-       sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
-       sprintf(info->bus_info, "%s",
-               dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+       sprintf(info->bus_info, "%s", dev_name(drvr_priv->pub.dev));
 }
 
 static struct ethtool_ops brcmf_ethtool_ops = {
@@ -765,14 +729,12 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
 static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
                                    int cmd)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
-       int ifidx;
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
 
-       ifidx = brcmf_net2idx(drvr_priv, ndev);
-       brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+       brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
 
-       if (ifidx == BRCMF_BAD_IF)
+       if (!drvr_priv->iflist[ifp->idx])
                return -1;
 
        if (cmd == SIOCETHTOOL)
@@ -788,22 +750,19 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
        s32 err = 0;
        int buflen = 0;
        bool is_set_key_cmd;
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
-       int ifidx;
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
 
        memset(&dcmd, 0, sizeof(dcmd));
        dcmd.cmd = cmd;
        dcmd.buf = arg;
        dcmd.len = len;
 
-       ifidx = brcmf_net2idx(drvr_priv, ndev);
-
        if (dcmd.buf != NULL)
                buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
 
        /* send to dongle (must be up, and wl) */
-       if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+       if ((drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA)) {
                brcmf_dbg(ERROR, "DONGLE_DOWN\n");
                err = -EIO;
                goto done;
@@ -826,7 +785,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
        if (is_set_key_cmd)
                brcmf_netdev_wait_pend8021x(ndev);
 
-       err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+       err = brcmf_proto_dcmd(&drvr_priv->pub, ifp->idx, &dcmd, buflen);
 
 done:
        if (err > 0)
@@ -837,7 +796,8 @@ done:
 
 static int brcmf_netdev_stop(struct net_device *ndev)
 {
-       struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_pub *drvr = &ifp->info->pub;
 
        brcmf_dbg(TRACE, "Enter\n");
        brcmf_cfg80211_down(drvr->config);
@@ -853,16 +813,14 @@ static int brcmf_netdev_stop(struct net_device *ndev)
 
 static int brcmf_netdev_open(struct net_device *ndev)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)
-                                       netdev_priv(ndev);
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
        u32 toe_ol;
-       int ifidx = brcmf_net2idx(drvr_priv, ndev);
        s32 ret = 0;
 
-       brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
-
-       if (ifidx == 0) {       /* do it only for primary eth0 */
+       brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
 
+       if (ifp->idx == 0) {    /* do it only for primary eth0 */
                /* try to bring up bus */
                ret = brcmf_bus_start(&drvr_priv->pub);
                if (ret != 0) {
@@ -874,12 +832,12 @@ static int brcmf_netdev_open(struct net_device *ndev)
                memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
 
                /* Get current TOE mode from dongle */
-               if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+               if (brcmf_toe_get(drvr_priv, ifp->idx, &toe_ol) >= 0
                    && (toe_ol & TOE_TX_CSUM_OL) != 0)
-                       drvr_priv->iflist[ifidx]->ndev->features |=
+                       drvr_priv->iflist[ifp->idx]->ndev->features |=
                                NETIF_F_IP_CSUM;
                else
-                       drvr_priv->iflist[ifidx]->ndev->features &=
+                       drvr_priv->iflist[ifp->idx]->ndev->features &=
                                ~NETIF_F_IP_CSUM;
        }
        /* Allow transmit calls */
@@ -893,75 +851,62 @@ static int brcmf_netdev_open(struct net_device *ndev)
        return ret;
 }
 
+static const struct net_device_ops brcmf_netdev_ops_pri = {
+       .ndo_open = brcmf_netdev_open,
+       .ndo_stop = brcmf_netdev_stop,
+       .ndo_get_stats = brcmf_netdev_get_stats,
+       .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+       .ndo_start_xmit = brcmf_netdev_start_xmit,
+       .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+       .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
 int
-brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
-            char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, char *name, u8 *mac_addr)
 {
        struct brcmf_if *ifp;
-       int ret = 0, err = 0;
+       struct net_device *ndev;
 
-       brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+       brcmf_dbg(TRACE, "idx %d\n", ifidx);
 
        ifp = drvr_priv->iflist[ifidx];
-       if (!ifp) {
-               ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
-               if (!ifp)
-                       return -ENOMEM;
+       /*
+        * Delete the existing interface before overwriting it
+        * in case we missed the BRCMF_E_IF_DEL event.
+        */
+       if (ifp) {
+               brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+                         ifp->ndev->name);
+               netif_stop_queue(ifp->ndev);
+               unregister_netdev(ifp->ndev);
+               free_netdev(ifp->ndev);
+               drvr_priv->iflist[ifidx] = NULL;
+       }
+
+       /* Allocate netdev, including space for private structure */
+       ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
+       if (!ndev) {
+               brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+               return -ENOMEM;
        }
 
-       memset(ifp, 0, sizeof(struct brcmf_if));
+       ifp = netdev_priv(ndev);
+       ifp->ndev = ndev;
        ifp->info = drvr_priv;
        drvr_priv->iflist[ifidx] = ifp;
+       ifp->idx = ifidx;
        if (mac_addr != NULL)
                memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
 
-       if (ndev == NULL) {
-               ifp->state = BRCMF_E_IF_ADD;
-               ifp->idx = ifidx;
-               /*
-                * Delete the existing interface before overwriting it
-                * in case we missed the BRCMF_E_IF_DEL event.
-                */
-               if (ifp->ndev != NULL) {
-                       brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
-                                 ifp->ndev->name);
-                       netif_stop_queue(ifp->ndev);
-                       unregister_netdev(ifp->ndev);
-                       free_netdev(ifp->ndev);
-               }
-
-               /* Allocate netdev, including space for private structure */
-               ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
-                                        ether_setup);
-               if (!ifp->ndev) {
-                       brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
-                       ret = -ENOMEM;
-               }
-
-               if (ret == 0) {
-                       memcpy(netdev_priv(ifp->ndev), &drvr_priv,
-                              sizeof(drvr_priv));
-                       err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
-                       if (err != 0) {
-                               brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
-                                         err);
-                               ret = -EOPNOTSUPP;
-                       } else {
-                               brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
-                                         current->pid, ifp->ndev->name);
-                               ifp->state = 0;
-                       }
-               }
-
-               if (ret < 0) {
-                       if (ifp->ndev)
-                               free_netdev(ifp->ndev);
+       if (brcmf_net_attach(&drvr_priv->pub, ifp->idx)) {
+               brcmf_dbg(ERROR, "brcmf_net_attach failed");
+               free_netdev(ifp->ndev);
+               drvr_priv->iflist[ifidx] = NULL;
+               return -EOPNOTSUPP;
+       }
 
-                       drvr_priv->iflist[ifp->idx] = NULL;
-                       kfree(ifp);
-               }
-       } else
-               ifp->ndev = ndev;
+       brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+                 current->pid, ifp->ndev->name);
 
        return 0;
 }
@@ -977,47 +922,37 @@ void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
                brcmf_dbg(ERROR, "Null interface\n");
                return;
        }
+       if (ifp->ndev) {
+               if (ifidx == 0) {
+                       if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+                               rtnl_lock();
+                               brcmf_netdev_stop(ifp->ndev);
+                               rtnl_unlock();
+                       }
+               } else {
+                       netif_stop_queue(ifp->ndev);
+               }
 
-       ifp->state = BRCMF_E_IF_DEL;
-       ifp->idx = ifidx;
-       if (ifp->ndev != NULL) {
-               netif_stop_queue(ifp->ndev);
                unregister_netdev(ifp->ndev);
-               free_netdev(ifp->ndev);
                drvr_priv->iflist[ifidx] = NULL;
-               kfree(ifp);
+               if (ifidx == 0)
+                       brcmf_cfg80211_detach(drvr_priv->pub.config);
+               free_netdev(ifp->ndev);
        }
 }
 
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+struct brcmf_pub *brcmf_attach(struct brcmf_sdio *bus, uint bus_hdrlen,
+                              struct device *dev)
 {
        struct brcmf_info *drvr_priv = NULL;
-       struct net_device *ndev;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       /* Allocate netdev, including space for private structure */
-       ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
-       if (!ndev) {
-               brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
-               goto fail;
-       }
-
        /* Allocate primary brcmf_info */
        drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
        if (!drvr_priv)
                goto fail;
 
-       /*
-        * Save the brcmf_info into the priv
-        */
-       memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
-       if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
-           BRCMF_BAD_IF)
-               goto fail;
-
-       ndev->netdev_ops = NULL;
        mutex_init(&drvr_priv->proto_block);
 
        /* Link to info module */
@@ -1026,6 +961,8 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
        /* Link to bus module */
        drvr_priv->pub.bus = bus;
        drvr_priv->pub.hdrlen = bus_hdrlen;
+       drvr_priv->pub.bus_if = dev_get_drvdata(dev);
+       drvr_priv->pub.dev = dev;
 
        /* Attach and link in the protocol */
        if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
@@ -1033,29 +970,12 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
                goto fail;
        }
 
-       /* Attach and link in the cfg80211 */
-       drvr_priv->pub.config =
-                       brcmf_cfg80211_attach(ndev,
-                                             brcmf_bus_get_device(bus),
-                                             &drvr_priv->pub);
-       if (drvr_priv->pub.config == NULL) {
-               brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
-               goto fail;
-       }
-
        INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
        INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
 
-       /*
-        * Save the brcmf_info into the priv
-        */
-       memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
        return &drvr_priv->pub;
 
 fail:
-       if (ndev)
-               free_netdev(ndev);
        if (drvr_priv)
                brcmf_detach(&drvr_priv->pub);
 
@@ -1072,14 +992,14 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
        brcmf_dbg(TRACE, "\n");
 
        /* Bring up the bus */
-       ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+       ret = brcmf_sdbrcm_bus_init(drvr_priv->pub.dev);
        if (ret != 0) {
                brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
                return ret;
        }
 
        /* If bus is not ready, can't come up */
-       if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+       if (drvr_priv->pub.bus_if->state != BRCMF_BUS_DATA) {
                brcmf_dbg(ERROR, "failed bus is not ready\n");
                return -ENODEV;
        }
@@ -1123,16 +1043,6 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
        return 0;
 }
 
-static struct net_device_ops brcmf_netdev_ops_pri = {
-       .ndo_open = brcmf_netdev_open,
-       .ndo_stop = brcmf_netdev_stop,
-       .ndo_get_stats = brcmf_netdev_get_stats,
-       .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
-       .ndo_start_xmit = brcmf_netdev_start_xmit,
-       .ndo_set_mac_address = brcmf_netdev_set_mac_address,
-       .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
 int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
 {
        struct brcmf_info *drvr_priv = drvr->info;
@@ -1169,6 +1079,15 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
 
        memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
 
+       /* attach to cfg80211 for primary interface */
+       if (!ifidx) {
+               drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
+               if (drvr->config == NULL) {
+                       brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+                       goto fail;
+               }
+       }
+
        if (register_netdev(ndev) != 0) {
                brcmf_dbg(ERROR, "couldn't register the net device\n");
                goto fail;
@@ -1196,7 +1115,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
                        brcmf_proto_stop(&drvr_priv->pub);
 
                        /* Stop the bus module */
-                       brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
+                       brcmf_sdbrcm_bus_stop(drvr_priv->pub.dev);
                }
        }
 }
@@ -1210,21 +1129,13 @@ void brcmf_detach(struct brcmf_pub *drvr)
        if (drvr) {
                drvr_priv = drvr->info;
                if (drvr_priv) {
-                       struct brcmf_if *ifp;
                        int i;
 
-                       for (i = 1; i < BRCMF_MAX_IFS; i++)
+                       /* make sure primary interface removed last */
+                       for (i = BRCMF_MAX_IFS-1; i > -1; i--)
                                if (drvr_priv->iflist[i])
                                        brcmf_del_if(drvr_priv, i);
 
-                       ifp = drvr_priv->iflist[0];
-                       if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
-                               rtnl_lock();
-                               brcmf_netdev_stop(ifp->ndev);
-                               rtnl_unlock();
-                               unregister_netdev(ifp->ndev);
-                       }
-
                        cancel_work_sync(&drvr_priv->setmacaddr_work);
                        cancel_work_sync(&drvr_priv->multicast_work);
 
@@ -1233,43 +1144,11 @@ void brcmf_detach(struct brcmf_pub *drvr)
                        if (drvr->prot)
                                brcmf_proto_detach(drvr);
 
-                       brcmf_cfg80211_detach(drvr->config);
-
-                       free_netdev(ifp->ndev);
-                       kfree(ifp);
                        kfree(drvr_priv);
                }
        }
 }
 
-static void __exit brcmf_module_cleanup(void)
-{
-       brcmf_dbg(TRACE, "Enter\n");
-
-       brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
-{
-       int error;
-
-       brcmf_dbg(TRACE, "Enter\n");
-
-       error = brcmf_bus_register();
-
-       if (error) {
-               brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
-               goto failed;
-       }
-       return 0;
-
-failed:
-       return -EINVAL;
-}
-
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
-
 int brcmf_os_proto_block(struct brcmf_pub *drvr)
 {
        struct brcmf_info *drvr_priv = drvr->info;
@@ -1302,7 +1181,8 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
 
 int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
 {
-       struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_info *drvr_priv = ifp->info;
        int timeout = 10 * HZ / 1000;
        int ntimes = MAX_WAIT_FOR_8021X_TX;
        int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
index 313b8bf..18597fe 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/semaphore.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/bcma/bcma.h>
 #include <asm/unaligned.h>
 #include <defs.h>
 #include <brcmu_wifi.h>
@@ -35,6 +36,7 @@
 #include <brcm_hw_ids.h>
 #include <soc.h>
 #include "sdio_host.h"
+#include "sdio_chip.h"
 
 #define DCMD_RESP_TIMEOUT  2000        /* In milli second */
 
@@ -89,7 +91,6 @@ struct rte_console {
 #include "dhd_bus.h"
 #include "dhd_proto.h"
 #include "dhd_dbg.h"
-#include <bcmchip.h>
 
 #define TXQLEN         2048    /* bulk tx queue length */
 #define TXHI           (TXQLEN - 256)  /* turn on flow control above TXHI */
@@ -134,33 +135,6 @@ struct rte_console {
 /*   Force no backplane reset */
 #define SBSDIO_DEVCTL_RST_NOBPRESET    0x20
 
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP               0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT                        0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP               0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ           0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ            0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF     0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL               0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL                        0x80
-
-#define SBSDIO_AVBITS          (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval)   ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval)    (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-
-#define SBSDIO_CLKAV(regval, alponly) \
-       (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
 /* direct(mapped) cis space */
 
 /* MAPPED common CIS address */
@@ -335,49 +309,10 @@ struct rte_console {
 /* Flags for SDH calls */
 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
 
-/* sbimstate */
-#define        SBIM_IBE                0x20000 /* inbanderror */
-#define        SBIM_TO                 0x40000 /* timeout */
-#define        SBIM_BY                 0x01800000      /* busy (sonics >= 2.3) */
-#define        SBIM_RJ                 0x02000000      /* reject (sonics >= 2.3) */
-
-/* sbtmstatelow */
-
-/* reset */
-#define        SBTML_RESET             0x0001
-/* reject field */
-#define        SBTML_REJ_MASK          0x0006
-/* reject */
-#define        SBTML_REJ               0x0002
-/* temporary reject, for error recovery */
-#define        SBTML_TMPREJ            0x0004
-
-/* Shift to locate the SI control flags in sbtml */
-#define        SBTML_SICF_SHIFT        16
-
-/* sbtmstatehigh */
-#define        SBTMH_SERR              0x0001  /* serror */
-#define        SBTMH_INT               0x0002  /* interrupt */
-#define        SBTMH_BUSY              0x0004  /* busy */
-#define        SBTMH_TO                0x0020  /* timeout (sonics >= 2.3) */
-
-/* Shift to locate the SI status flags in sbtmh */
-#define        SBTMH_SISF_SHIFT        16
-
-/* sbidlow */
-#define        SBIDL_INIT              0x80    /* initiator */
-
-/* sbidhigh */
-#define        SBIDH_RC_MASK           0x000f  /* revision code */
-#define        SBIDH_RCE_MASK          0x7000  /* revision code extension field */
-#define        SBIDH_RCE_SHIFT         8
-#define        SBCOREREV(sbidh) \
-       ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
-         ((sbidh) & SBIDH_RC_MASK))
-#define        SBIDH_CC_MASK           0x8ff0  /* core code */
-#define        SBIDH_CC_SHIFT          4
-#define        SBIDH_VC_MASK           0xffff0000      /* vendor code */
-#define        SBIDH_VC_SHIFT          16
+#define BRCMFMAC_FW_NAME       "brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME       "brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
 
 /*
  * Conversion of 802.1D priority to precedence level
@@ -388,17 +323,6 @@ static uint prio2prec(u32 prio)
               (prio^2) : prio;
 }
 
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
-               (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
-               (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
-               (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
 /* core registers */
 struct sdpcmd_regs {
        u32 corecontrol;                /* 0x00, rev8 */
@@ -524,23 +448,8 @@ struct sdpcm_shared_le {
 
 
 /* misc chip info needed by some of the routines */
-struct chip_info {
-       u32 chip;
-       u32 chiprev;
-       u32 cccorebase;
-       u32 ccrev;
-       u32 cccaps;
-       u32 buscorebase; /* 32 bits backplane bus address */
-       u32 buscorerev;
-       u32 buscoretype;
-       u32 ramcorebase;
-       u32 armcorebase;
-       u32 pmurev;
-       u32 ramsize;
-};
-
 /* Private data for SDIO bus interaction */
-struct brcmf_bus {
+struct brcmf_sdio {
        struct brcmf_pub *drvr;
 
        struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
@@ -574,7 +483,7 @@ struct brcmf_bus {
        uint txminmax;
 
        struct sk_buff *glomd;  /* Packet containing glomming descriptor */
-       struct sk_buff *glom;   /* Packet chain for glommed superframe */
+       struct sk_buff_head glom; /* Packet list for glommed superframe */
        uint glomerr;           /* Glom packet read errors */
 
        u8 *rxbuf;              /* Buffer for receiving control packets */
@@ -657,52 +566,10 @@ struct brcmf_bus {
 
        struct semaphore sdsem;
 
-       const char *fw_name;
        const struct firmware *firmware;
-       const char *nv_name;
        u32 fw_ptr;
 };
 
-struct sbconfig {
-       u32 PAD[2];
-       u32 sbipsflag;  /* initiator port ocp slave flag */
-       u32 PAD[3];
-       u32 sbtpsflag;  /* target port ocp slave flag */
-       u32 PAD[11];
-       u32 sbtmerrloga;        /* (sonics >= 2.3) */
-       u32 PAD;
-       u32 sbtmerrlog; /* (sonics >= 2.3) */
-       u32 PAD[3];
-       u32 sbadmatch3; /* address match3 */
-       u32 PAD;
-       u32 sbadmatch2; /* address match2 */
-       u32 PAD;
-       u32 sbadmatch1; /* address match1 */
-       u32 PAD[7];
-       u32 sbimstate;  /* initiator agent state */
-       u32 sbintvec;   /* interrupt mask */
-       u32 sbtmstatelow;       /* target state */
-       u32 sbtmstatehigh;      /* target state */
-       u32 sbbwa0;             /* bandwidth allocation table0 */
-       u32 PAD;
-       u32 sbimconfiglow;      /* initiator configuration */
-       u32 sbimconfighigh;     /* initiator configuration */
-       u32 sbadmatch0; /* address match0 */
-       u32 PAD;
-       u32 sbtmconfiglow;      /* target configuration */
-       u32 sbtmconfighigh;     /* target configuration */
-       u32 sbbconfig;  /* broadcast configuration */
-       u32 PAD;
-       u32 sbbstate;   /* broadcast state */
-       u32 PAD[3];
-       u32 sbactcnfg;  /* activate configuration */
-       u32 PAD[3];
-       u32 sbflagst;   /* current sbflags */
-       u32 PAD[3];
-       u32 sbidlow;            /* identification */
-       u32 sbidhigh;   /* identification */
-};
-
 /* clkstate */
 #define CLK_NONE       0
 #define CLK_SDONLY     1
@@ -737,7 +604,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
 }
 
 /* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
 {
        return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
               ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -748,12 +615,14 @@ static bool data_ok(struct brcmf_bus *bus)
  * adresses on the 32 bit backplane bus.
  */
 static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
 {
+       u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
        *retryvar = 0;
        do {
                *regvar = brcmf_sdcard_reg_read(bus->sdiodev,
-                               bus->ci->buscorebase + reg_offset, sizeof(u32));
+                               bus->ci->c_inf[idx].base + reg_offset,
+                               sizeof(u32));
        } while (brcmf_sdcard_regfail(bus->sdiodev) &&
                 (++(*retryvar) <= retry_limit));
        if (*retryvar) {
@@ -766,12 +635,13 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
 }
 
 static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
 {
+       u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
        *retryvar = 0;
        do {
                brcmf_sdcard_reg_write(bus->sdiodev,
-                                      bus->ci->buscorebase + reg_offset,
+                                      bus->ci->c_inf[idx].base + reg_offset,
                                       sizeof(u32), regval);
        } while (brcmf_sdcard_regfail(bus->sdiodev) &&
                 (++(*retryvar) <= retry_limit));
@@ -790,14 +660,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
 /* Packet free applicable unconditionally for sdio and sdspi.
  * Conditional if bufpool was present for gspi bus.
  */
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
 {
        if (bus->usebufpool)
                brcmu_pkt_buf_free_skb(pkt);
 }
 
 /* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 {
        int err;
        u8 clkctl, clkreq, devctl;
@@ -812,10 +682,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
                clkreq =
                    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
 
-               if ((bus->ci->chip == BCM4329_CHIP_ID)
-                   && (bus->ci->chiprev == 0))
-                       clkreq |= SBSDIO_FORCE_ALP;
-
                brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
                                       SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
                if (err) {
@@ -823,14 +689,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
                        return -EBADE;
                }
 
-               if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
-                              && (bus->ci->buscorerev == 9))) {
-                       u32 dummy, retries;
-                       r_sdreg32(bus, &dummy,
-                                 offsetof(struct sdpcmd_regs, clockctlstatus),
-                                 &retries);
-               }
-
                /* Check current status */
                clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
                                               SBSDIO_FUNC1_CHIPCLKCSR, &err);
@@ -930,7 +788,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
 }
 
 /* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -943,7 +801,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
 }
 
 /* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
 {
 #ifdef BCMDBG
        uint oldstate = bus->clkstate;
@@ -999,7 +857,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
        return 0;
 }
 
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
 {
        uint retries = 0;
 
@@ -1034,11 +892,9 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
                        SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
 
                /* Isolate the bus */
-               if (bus->ci->chip != BCM4329_CHIP_ID) {
-                       brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-                               SBSDIO_DEVICE_CTL,
-                               SBSDIO_DEVCTL_PADS_ISO, NULL);
-               }
+               brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+                       SBSDIO_DEVICE_CTL,
+                       SBSDIO_DEVCTL_PADS_ISO, NULL);
 
                /* Change state */
                bus->sleeping = true;
@@ -1049,13 +905,6 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
                brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
                        SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
 
-               /* Force pad isolation off if possible
-                        (in case power never toggled) */
-               if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
-                   && (bus->ci->buscorerev >= 10))
-                       brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-                               SBSDIO_DEVICE_CTL, 0, NULL);
-
                /* Make sure the controller has the bus up */
                brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
 
@@ -1080,13 +929,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
        return 0;
 }
 
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
 {
        if (bus->sleeping)
                brcmf_sdbrcm_bussleep(bus, false);
 }
 
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
 {
        u32 intstatus = 0;
        u32 hmb_data;
@@ -1162,7 +1011,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
        return intstatus;
 }
 
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 {
        uint retries = 0;
        u16 lastrbc;
@@ -1219,16 +1068,61 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
 
        /* If we can't reach the device, signal failure */
        if (err || brcmf_sdcard_regfail(bus->sdiodev))
-               bus->drvr->busstate = BRCMF_BUS_DOWN;
+               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
 }
 
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+/* copy a buffer into a pkt buffer chain */
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
+{
+       uint n, ret = 0;
+       struct sk_buff *p;
+       u8 *buf;
+
+       buf = bus->dataptr;
+
+       /* copy the data */
+       skb_queue_walk(&bus->glom, p) {
+               n = min_t(uint, p->len, len);
+               memcpy(p->data, buf, n);
+               buf += n;
+               len -= n;
+               ret += n;
+               if (!len)
+                       break;
+       }
+
+       return ret;
+}
+
+/* return total length of buffer chain */
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+{
+       struct sk_buff *p;
+       uint total;
+
+       total = 0;
+       skb_queue_walk(&bus->glom, p)
+               total += p->len;
+       return total;
+}
+
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+{
+       struct sk_buff *cur, *next;
+
+       skb_queue_walk_safe(&bus->glom, cur, next) {
+               skb_unlink(cur, &bus->glom);
+               brcmu_pkt_buf_free_skb(cur);
+       }
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
 {
        u16 dlen, totlen;
        u8 *dptr, num = 0;
 
        u16 sublen, check;
-       struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+       struct sk_buff *pfirst, *pnext;
 
        int errcode;
        u8 chan, seq, doff, sfdoff;
@@ -1240,11 +1134,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
        /* If packets, issue read(s) and send up packet chain */
        /* Return sequence numbers consumed? */
 
-       brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+       brcmf_dbg(TRACE, "start: glomd %p glom %p\n",
+                 bus->glomd, skb_peek(&bus->glom));
 
        /* If there's a descriptor, generate the packet chain */
        if (bus->glomd) {
-               pfirst = plast = pnext = NULL;
+               pfirst = pnext = NULL;
                dlen = (u16) (bus->glomd->len);
                dptr = bus->glomd->data;
                if (!dlen || (dlen & 1)) {
@@ -1287,12 +1182,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                                          num, sublen);
                                break;
                        }
-                       if (!pfirst) {
-                               pfirst = plast = pnext;
-                       } else {
-                               plast->next = pnext;
-                               plast = pnext;
-                       }
+                       skb_queue_tail(&bus->glom, pnext);
 
                        /* Adhere to start alignment requirements */
                        pkt_align(pnext, sublen, BRCMF_SDALIGN);
@@ -1308,12 +1198,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                                brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
                                          bus->nextlen, totlen, rxseq);
                        }
-                       bus->glom = pfirst;
                        pfirst = pnext = NULL;
                } else {
-                       if (pfirst)
-                               brcmu_pkt_buf_free_skb(pfirst);
-                       bus->glom = NULL;
+                       brcmf_sdbrcm_free_glom(bus);
                        num = 0;
                }
 
@@ -1325,37 +1212,33 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
 
        /* Ok -- either we just generated a packet chain,
                 or had one from before */
-       if (bus->glom) {
+       if (!skb_queue_empty(&bus->glom)) {
                if (BRCMF_GLOM_ON()) {
                        brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
-                       for (pnext = bus->glom; pnext; pnext = pnext->next) {
+                       skb_queue_walk(&bus->glom, pnext) {
                                brcmf_dbg(GLOM, "    %p: %p len 0x%04x (%d)\n",
                                          pnext, (u8 *) (pnext->data),
                                          pnext->len, pnext->len);
                        }
                }
 
-               pfirst = bus->glom;
-               dlen = (u16) brcmu_pkttotlen(pfirst);
+               pfirst = skb_peek(&bus->glom);
+               dlen = (u16) brcmf_sdbrcm_glom_len(bus);
 
                /* Do an SDIO read for the superframe.  Configurable iovar to
                 * read directly into the chained packet, or allocate a large
                 * packet and and copy into the chain.
                 */
                if (usechain) {
-                       errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+                       errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
                                        bus->sdiodev->sbwad,
-                                       SDIO_FUNC_2,
-                                       F2SYNC, (u8 *) pfirst->data, dlen,
-                                       pfirst);
+                                       SDIO_FUNC_2, F2SYNC, &bus->glom);
                } else if (bus->dataptr) {
                        errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
                                        bus->sdiodev->sbwad,
-                                       SDIO_FUNC_2,
-                                       F2SYNC, bus->dataptr, dlen,
-                                       NULL);
-                       sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
-                                               bus->dataptr);
+                                       SDIO_FUNC_2, F2SYNC,
+                                       bus->dataptr, dlen);
+                       sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
                        if (sublen != dlen) {
                                brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
                                          dlen, sublen);
@@ -1380,9 +1263,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                        } else {
                                bus->glomerr = 0;
                                brcmf_sdbrcm_rxfail(bus, true, false);
-                               brcmu_pkt_buf_free_skb(bus->glom);
                                bus->rxglomfail++;
-                               bus->glom = NULL;
+                               brcmf_sdbrcm_free_glom(bus);
                        }
                        return 0;
                }
@@ -1455,10 +1337,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                /* Remove superframe header, remember offset */
                skb_pull(pfirst, doff);
                sfdoff = doff;
+               num = 0;
 
                /* Validate all the subframe headers */
-               for (num = 0, pnext = pfirst; pnext && !errcode;
-                    num++, pnext = pnext->next) {
+               skb_queue_walk(&bus->glom, pnext) {
+                       /* leave when invalid subframe is found */
+                       if (errcode)
+                               break;
+
                        dptr = (u8 *) (pnext->data);
                        dlen = (u16) (pnext->len);
                        sublen = get_unaligned_le16(dptr);
@@ -1491,6 +1377,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                                          num, doff, sublen, SDPCM_HDRLEN);
                                errcode = -1;
                        }
+                       /* increase the subframe count */
+                       num++;
                }
 
                if (errcode) {
@@ -1503,23 +1391,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                        } else {
                                bus->glomerr = 0;
                                brcmf_sdbrcm_rxfail(bus, true, false);
-                               brcmu_pkt_buf_free_skb(bus->glom);
                                bus->rxglomfail++;
-                               bus->glom = NULL;
+                               brcmf_sdbrcm_free_glom(bus);
                        }
                        bus->nextlen = 0;
                        return 0;
                }
 
                /* Basic SD framing looks ok - process each packet (header) */
-               save_pfirst = pfirst;
-               bus->glom = NULL;
-               plast = NULL;
-
-               for (num = 0; pfirst; rxseq++, pfirst = pnext) {
-                       pnext = pfirst->next;
-                       pfirst->next = NULL;
 
+               skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
                        dptr = (u8 *) (pfirst->data);
                        sublen = get_unaligned_le16(dptr);
                        chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1539,6 +1420,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                                bus->rx_badseq++;
                                rxseq = seq;
                        }
+                       rxseq++;
+
 #ifdef BCMDBG
                        if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
                                printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1551,36 +1434,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                        skb_pull(pfirst, doff);
 
                        if (pfirst->len == 0) {
+                               skb_unlink(pfirst, &bus->glom);
                                brcmu_pkt_buf_free_skb(pfirst);
-                               if (plast)
-                                       plast->next = pnext;
-                               else
-                                       save_pfirst = pnext;
-
                                continue;
                        } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
                                                       pfirst) != 0) {
                                brcmf_dbg(ERROR, "rx protocol error\n");
                                bus->drvr->rx_errors++;
+                               skb_unlink(pfirst, &bus->glom);
                                brcmu_pkt_buf_free_skb(pfirst);
-                               if (plast)
-                                       plast->next = pnext;
-                               else
-                                       save_pfirst = pnext;
-
                                continue;
                        }
 
-                       /* this packet will go up, link back into
-                                chain and count it */
-                       pfirst->next = pnext;
-                       plast = pfirst;
-                       num++;
-
 #ifdef BCMDBG
                        if (BRCMF_GLOM_ON()) {
                                brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
-                                         num, pfirst, pfirst->data,
+                                         bus->glom.qlen, pfirst, pfirst->data,
                                          pfirst->len, pfirst->next,
                                          pfirst->prev);
                                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1589,19 +1458,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
                        }
 #endif                         /* BCMDBG */
                }
-               if (num) {
+               /* sent any remaining packets up */
+               if (bus->glom.qlen) {
                        up(&bus->sdsem);
-                       brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+                       brcmf_rx_frame(bus->drvr, ifidx, &bus->glom);
                        down(&bus->sdsem);
                }
 
                bus->rxglomframes++;
-               bus->rxglompkts += num;
+               bus->rxglompkts += bus->glom.qlen;
        }
        return num;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
                                        bool *pending)
 {
        DECLARE_WAITQUEUE(wait, current);
@@ -1623,7 +1493,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
        return timeout;
 }
 
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
 {
        if (waitqueue_active(&bus->dcmd_resp_wait))
                wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1631,7 +1501,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
        return 0;
 }
 static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
 {
        uint rdlen, pad;
 
@@ -1689,8 +1559,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
        sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
                                bus->sdiodev->sbwad,
                                SDIO_FUNC_2,
-                               F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
-                               NULL);
+                               F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
        bus->f2rxdata++;
 
        /* Control frame failures need retransmission */
@@ -1721,7 +1590,7 @@ done:
 }
 
 /* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
 {
        if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
                *pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1734,7 +1603,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
 }
 
 static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
                         struct sk_buff **pkt, u8 **rxbuf)
 {
        int sdret;              /* Return code from calls */
@@ -1746,9 +1615,8 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
        pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
        *rxbuf = (u8 *) ((*pkt)->data);
        /* Read the entire frame */
-       sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-                                     SDIO_FUNC_2, F2SYNC,
-                                     *rxbuf, rdlen, *pkt);
+       sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+                                     SDIO_FUNC_2, F2SYNC, *pkt);
        bus->f2rxdata++;
 
        if (sdret < 0) {
@@ -1767,7 +1635,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
 
 /* Checks the header */
 static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
                  u8 rxseq, u16 nextlen, u16 *len)
 {
        u16 check;
@@ -1823,7 +1691,7 @@ fail:
 
 /* Return true if there may be more frames to read */
 static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
 {
        u16 len, check; /* Extracted hardware header fields */
        u8 chan, seq, doff;     /* Extracted software header fields */
@@ -1846,14 +1714,15 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
        *finished = false;
 
        for (rxseq = bus->rx_seq, rxleft = maxframes;
-            !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+            !bus->rxskip && rxleft &&
+            bus->drvr->bus_if->state != BRCMF_BUS_DOWN;
             rxseq++, rxleft--) {
 
                /* Handle glomming separately */
-               if (bus->glom || bus->glomd) {
+               if (bus->glomd || !skb_queue_empty(&bus->glom)) {
                        u8 cnt;
                        brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
-                                 bus->glomd, bus->glom);
+                                 bus->glomd, skb_peek(&bus->glom));
                        cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
                        brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
                        rxseq += cnt - 1;
@@ -1976,7 +1845,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
                /* Read frame header (hardware and software) */
                sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
                                              SDIO_FUNC_2, F2SYNC, bus->rxhdr,
-                                             BRCMF_FIRSTREAD, NULL);
+                                             BRCMF_FIRSTREAD);
                bus->f2rxhdrs++;
 
                if (sdret < 0) {
@@ -2125,9 +1994,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
                pkt_align(pkt, rdlen, BRCMF_SDALIGN);
 
                /* Read the remaining frame data */
-               sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
-                               SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
-                               rdlen, pkt);
+               sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+                                             SDIO_FUNC_2, F2SYNC, pkt);
                bus->f2rxdata++;
 
                if (sdret < 0) {
@@ -2194,7 +2062,7 @@ deliver:
 
                /* Unlock during rx call */
                up(&bus->sdsem);
-               brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+               brcmf_rx_packet(bus->drvr, ifidx, pkt);
                down(&bus->sdsem);
        }
        rxcount = maxframes - rxleft;
@@ -2214,16 +2082,8 @@ deliver:
        return rxcount;
 }
 
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
-                   u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
-       return brcmf_sdcard_send_buf
-               (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
 static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
 {
        up(&bus->sdsem);
        wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2233,7 +2093,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
 }
 
 static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
 {
        if (waitqueue_active(&bus->ctrl_wait))
                wake_up_interruptible(&bus->ctrl_wait);
@@ -2242,7 +2102,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
 
 /* Writes a HW/SW header into the packet and sends it. */
 /* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
                              uint chan, bool free_pkt)
 {
        int ret;
@@ -2331,9 +2191,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
        if (len & (ALIGNMENT - 1))
                        len = roundup(len, ALIGNMENT);
 
-       ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
-                                   SDIO_FUNC_2, F2SYNC, frame,
-                                   len, pkt);
+       ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+                                   SDIO_FUNC_2, F2SYNC, pkt);
        bus->f2txdata++;
 
        if (ret < 0) {
@@ -2380,7 +2239,7 @@ done:
        return ret;
 }
 
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 {
        struct sk_buff *pkt;
        u32 intstatus = 0;
@@ -2428,14 +2287,14 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
        }
 
        /* Deflow-control stack if needed */
-       if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
+       if (drvr->up && (drvr->bus_if->state == BRCMF_BUS_DATA) &&
            drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
                brcmf_txflowcontrol(drvr, 0, OFF);
 
        return cnt;
 }
 
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
 {
        u32 intstatus, newstatus = 0;
        uint retries = 0;
@@ -2463,7 +2322,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                                               SBSDIO_DEVICE_CTL, &err);
                if (err) {
                        brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
-                       bus->drvr->busstate = BRCMF_BUS_DOWN;
+                       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                }
 #endif                         /* BCMDBG */
 
@@ -2473,7 +2332,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                if (err) {
                        brcmf_dbg(ERROR, "error reading CSR: %d\n",
                                  err);
-                       bus->drvr->busstate = BRCMF_BUS_DOWN;
+                       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                }
 
                brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2486,7 +2345,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                        if (err) {
                                brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
                                          err);
-                               bus->drvr->busstate = BRCMF_BUS_DOWN;
+                               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                        }
                        devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
                        brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2494,7 +2353,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
                        if (err) {
                                brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
                                          err);
-                               bus->drvr->busstate = BRCMF_BUS_DOWN;
+                               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                        }
                        bus->clkstate = CLK_AVAIL;
                } else {
@@ -2596,9 +2455,9 @@ clkwait:
                (bus->clkstate == CLK_AVAIL)) {
                int ret, i;
 
-               ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+               ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
                        SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
-                       (u32) bus->ctrl_frame_len, NULL);
+                       (u32) bus->ctrl_frame_len);
 
                if (ret < 0) {
                        /* On failure, abort the command and
@@ -2650,11 +2509,11 @@ clkwait:
                 else await next interrupt */
        /* On failed register access, all bets are off:
                 no resched or interrupts */
-       if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+       if ((bus->drvr->bus_if->state == BRCMF_BUS_DOWN) ||
            brcmf_sdcard_regfail(bus->sdiodev)) {
                brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
                          brcmf_sdcard_regfail(bus->sdiodev));
-               bus->drvr->busstate = BRCMF_BUS_DOWN;
+               bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
                bus->intstatus = 0;
        } else if (bus->clkstate == CLK_PENDING) {
                brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2681,7 +2540,7 @@ clkwait:
 
 static int brcmf_sdbrcm_dpc_thread(void *data)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *) data;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
 
        allow_signal(SIGTERM);
        /* Run until signal received */
@@ -2691,12 +2550,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
                if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
                        /* Call bus dpc unless it indicated down
                        (then clean stop) */
-                       if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+                       if (bus->drvr->bus_if->state != BRCMF_BUS_DOWN) {
                                if (brcmf_sdbrcm_dpc(bus))
                                        complete(&bus->dpc_wait);
                        } else {
                                /* after stopping the bus, exit thread */
-                               brcmf_sdbrcm_bus_stop(bus);
+                               brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
                                bus->dpc_tsk = NULL;
                                break;
                        }
@@ -2706,10 +2565,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
        return 0;
 }
 
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
 {
        int ret = -EBADE;
        uint datalen, prec;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2757,7 +2619,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
 }
 
 static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
                 uint size)
 {
        int bcmerror = 0;
@@ -2818,7 +2680,7 @@ xfer_done:
 #ifdef BCMDBG
 #define CONSOLE_LINE_MAX       192
 
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
 {
        struct brcmf_console *c = &bus->console;
        u8 line[CONSOLE_LINE_MAX], ch;
@@ -2895,14 +2757,14 @@ break2:
 }
 #endif                         /* BCMDBG */
 
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
 {
        int i;
        int ret;
 
        bus->ctrl_frame_stat = false;
-       ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
-                                   SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+       ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+                                   SDIO_FUNC_2, F2SYNC, frame, len);
 
        if (ret < 0) {
                /* On failure, abort the command and terminate the frame */
@@ -2938,7 +2800,7 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
 }
 
 int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 {
        u8 *frame;
        u16 len;
@@ -2946,6 +2808,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
        uint retries = 0;
        u8 doff = 0;
        int ret = -1;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3053,11 +2918,14 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
 }
 
 int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
 {
        int timeleft;
        uint rxlen = 0;
        bool pending;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3090,7 +2958,7 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
        return rxlen ? (int)rxlen : -ETIMEDOUT;
 }
 
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
 {
        int bcmerror = 0;
 
@@ -3123,7 +2991,7 @@ err:
        return bcmerror;
 }
 
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
 {
        int bcmerror = 0;
        u32 varsize;
@@ -3210,135 +3078,11 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
        return bcmerror;
 }
 
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
-       u32 regdata;
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-               CORE_SB(corebase, sbtmstatelow), 4);
-       if (regdata & SBTML_RESET)
-               return;
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-               CORE_SB(corebase, sbtmstatelow), 4);
-       if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
-               /*
-                * set target reject and spin until busy is clear
-                * (preserve core-specific bits)
-                */
-               regdata = brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbtmstatelow), 4);
-               brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
-                                      4, regdata | SBTML_REJ);
-
-               regdata = brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbtmstatelow), 4);
-               udelay(1);
-               SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbtmstatehigh), 4) &
-                       SBTMH_BUSY), 100000);
-
-               regdata = brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbtmstatehigh), 4);
-               if (regdata & SBTMH_BUSY)
-                       brcmf_dbg(ERROR, "ARM core still busy\n");
-
-               regdata = brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbidlow), 4);
-               if (regdata & SBIDL_INIT) {
-                       regdata = brcmf_sdcard_reg_read(sdiodev,
-                               CORE_SB(corebase, sbimstate), 4) |
-                               SBIM_RJ;
-                       brcmf_sdcard_reg_write(sdiodev,
-                               CORE_SB(corebase, sbimstate), 4,
-                               regdata);
-                       regdata = brcmf_sdcard_reg_read(sdiodev,
-                               CORE_SB(corebase, sbimstate), 4);
-                       udelay(1);
-                       SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
-                               CORE_SB(corebase, sbimstate), 4) &
-                               SBIM_BY), 100000);
-               }
-
-               /* set reset and reject while enabling the clocks */
-               brcmf_sdcard_reg_write(sdiodev,
-                       CORE_SB(corebase, sbtmstatelow), 4,
-                       (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
-                       SBTML_REJ | SBTML_RESET));
-               regdata = brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbtmstatelow), 4);
-               udelay(10);
-
-               /* clear the initiator reject bit */
-               regdata = brcmf_sdcard_reg_read(sdiodev,
-                       CORE_SB(corebase, sbidlow), 4);
-               if (regdata & SBIDL_INIT) {
-                       regdata = brcmf_sdcard_reg_read(sdiodev,
-                               CORE_SB(corebase, sbimstate), 4) &
-                               ~SBIM_RJ;
-                       brcmf_sdcard_reg_write(sdiodev,
-                               CORE_SB(corebase, sbimstate), 4,
-                               regdata);
-               }
-       }
-
-       /* leave reset and reject asserted */
-       brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-               (SBTML_REJ | SBTML_RESET));
-       udelay(1);
-}
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
-       u32 regdata;
-
-       /*
-        * Must do the disable sequence first to work for
-        * arbitrary current core state.
-        */
-       brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
-
-       /*
-        * Now do the initialization sequence.
-        * set reset while enabling the clock and
-        * forcing them on throughout the core
-        */
-       brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-               ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
-               SBTML_RESET);
-       udelay(1);
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-                                       CORE_SB(corebase, sbtmstatehigh), 4);
-       if (regdata & SBTMH_SERR)
-               brcmf_sdcard_reg_write(sdiodev,
-                                      CORE_SB(corebase, sbtmstatehigh), 4, 0);
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-                                       CORE_SB(corebase, sbimstate), 4);
-       if (regdata & (SBIM_IBE | SBIM_TO))
-               brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
-                       regdata & ~(SBIM_IBE | SBIM_TO));
-
-       /* clear reset and allow it to propagate throughout the core */
-       brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-               (SICF_FGC << SBTML_SICF_SHIFT) |
-               (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
-       udelay(1);
-
-       /* leave clock enabled */
-       brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
-               (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
-       udelay(1);
-}
-
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
 {
        uint retries;
-       u32 regdata;
        int bcmerror = 0;
+       struct chip_info *ci = bus->ci;
 
        /* To enter download state, disable ARM and reset SOCRAM.
         * To exit download state, simply reset ARM (default is RAM boot).
@@ -3346,10 +3090,9 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
        if (enter) {
                bus->alp_only = true;
 
-               brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
-                                             bus->ci->armcorebase);
+               ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
 
-               brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+               ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
 
                /* Clear the top bit of memory */
                if (bus->ramsize) {
@@ -3358,11 +3101,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
                                         (u8 *)&zeros, 4);
                }
        } else {
-               regdata = brcmf_sdcard_reg_read(bus->sdiodev,
-                       CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
-               regdata &= (SBTML_RESET | SBTML_REJ_MASK |
-                       (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
-               if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+               if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
                        brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
                        bcmerror = -EBADE;
                        goto fail;
@@ -3377,18 +3116,18 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
                w_sdreg32(bus, 0xFFFFFFFF,
                          offsetof(struct sdpcmd_regs, intstatus), &retries);
 
-               brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+               ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
 
                /* Allow HT Clock now that the ARM is running. */
                bus->alp_only = false;
 
-               bus->drvr->busstate = BRCMF_BUS_LOAD;
+               bus->drvr->bus_if->state = BRCMF_BUS_LOAD;
        }
 fail:
        return bcmerror;
 }
 
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
 {
        if (bus->firmware->size < bus->fw_ptr + len)
                len = bus->firmware->size - bus->fw_ptr;
@@ -3398,10 +3137,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
        return len;
 }
 
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
 {
        int offset = 0;
        uint len;
@@ -3410,8 +3146,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
 
        brcmf_dbg(INFO, "Enter\n");
 
-       bus->fw_name = BCM4329_FW_NAME;
-       ret = request_firmware(&bus->firmware, bus->fw_name,
+       ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
                               &bus->sdiodev->func[2]->dev);
        if (ret) {
                brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3501,15 +3236,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
        return buf_len;
 }
 
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
 {
        uint len;
        char *memblock = NULL;
        char *bufp;
        int ret;
 
-       bus->nv_name = BCM4329_NV_NAME;
-       ret = request_firmware(&bus->firmware, bus->nv_name,
+       ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
                               &bus->sdiodev->func[2]->dev);
        if (ret) {
                brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3549,7 +3283,7 @@ err:
        return ret;
 }
 
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
 {
        int bcmerror = -1;
 
@@ -3582,7 +3316,7 @@ err:
 }
 
 static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
 {
        bool ret;
 
@@ -3596,12 +3330,15 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
        return ret;
 }
 
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+void brcmf_sdbrcm_bus_stop(struct device *dev)
 {
        u32 local_hostintmask;
        u8 saveclk;
        uint retries;
        int err;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3630,7 +3367,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
        bus->hostintmask = 0;
 
        /* Change our idea of bus state */
-       bus->drvr->busstate = BRCMF_BUS_DOWN;
+       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
 
        /* Force clocks on backplane to be sure F2 interrupt propagates */
        saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
@@ -3661,11 +3398,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
        /* Clear any held glomming stuff */
        if (bus->glomd)
                brcmu_pkt_buf_free_skb(bus->glomd);
-
-       if (bus->glom)
-               brcmu_pkt_buf_free_skb(bus->glom);
-
-       bus->glom = bus->glomd = NULL;
+       brcmf_sdbrcm_free_glom(bus);
 
        /* Clear rx control and wake any waiters */
        bus->rxlen = 0;
@@ -3678,9 +3411,11 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
        up(&bus->sdsem);
 }
 
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+int brcmf_sdbrcm_bus_init(struct device *dev)
 {
-       struct brcmf_bus *bus = drvr->bus;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+       struct brcmf_sdio *bus = sdiodev->bus;
        unsigned long timeout;
        uint retries = 0;
        u8 ready, enable;
@@ -3690,7 +3425,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
        brcmf_dbg(TRACE, "Enter\n");
 
        /* try to download image and nvram to the dongle */
-       if (drvr->busstate == BRCMF_BUS_DOWN) {
+       if (bus_if->state == BRCMF_BUS_DOWN) {
                if (!(brcmf_sdbrcm_download_firmware(bus)))
                        return -1;
        }
@@ -3756,7 +3491,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
                                       SBSDIO_WATERMARK, 8, &err);
 
                /* Set bus state according to enable result */
-               drvr->busstate = BRCMF_BUS_DATA;
+               bus_if->state = BRCMF_BUS_DATA;
        }
 
        else {
@@ -3771,7 +3506,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
                               SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
 
        /* If we didn't come up, turn off backplane clock */
-       if (drvr->busstate != BRCMF_BUS_DATA)
+       if (bus_if->state != BRCMF_BUS_DATA)
                brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
 
 exit:
@@ -3782,7 +3517,7 @@ exit:
 
 void brcmf_sdbrcm_isr(void *arg)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3791,7 +3526,7 @@ void brcmf_sdbrcm_isr(void *arg)
                return;
        }
 
-       if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+       if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN) {
                brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
                return;
        }
@@ -3814,14 +3549,14 @@ void brcmf_sdbrcm_isr(void *arg)
                complete(&bus->dpc_wait);
 }
 
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 {
-       struct brcmf_bus *bus;
+#ifdef BCMDBG
+       struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif /* BCMDBG */
 
        brcmf_dbg(TIMER, "Enter\n");
 
-       bus = drvr->bus;
-
        /* Ignore the timer if simulating bus down */
        if (bus->sleeping)
                return false;
@@ -3865,7 +3600,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
        }
 #ifdef BCMDBG
        /* Poll for console output periodically */
-       if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+       if (bus_if->state == BRCMF_BUS_DATA &&
+           bus->console_interval != 0) {
                bus->console.count += BRCMF_WD_POLL_MS;
                if (bus->console.count >= bus->console_interval) {
                        bus->console.count -= bus->console_interval;
@@ -3903,7 +3639,7 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
        return false;
 }
 
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3915,7 +3651,7 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
        bus->databuf = NULL;
 }
 
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3950,276 +3686,14 @@ fail:
        return false;
 }
 
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
-       u8 strength;    /* Pad Drive Strength in mA */
-       u8 sel;         /* Chip-specific select value */
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
-       {
-       4, 0x2}, {
-       2, 0x3}, {
-       1, 0x0}, {
-       0, 0x0}
-       };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
-       {
-       12, 0x7}, {
-       10, 0x6}, {
-       8, 0x5}, {
-       6, 0x4}, {
-       4, 0x2}, {
-       2, 0x1}, {
-       0, 0x0}
-       };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
-       {
-       32, 0x7}, {
-       26, 0x6}, {
-       22, 0x5}, {
-       16, 0x4}, {
-       12, 0x3}, {
-       8, 0x2}, {
-       4, 0x1}, {
-       0, 0x0}
-       };
-
-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
-
-static char *brcmf_chipname(uint chipid, char *buf, uint len)
-{
-       const char *fmt;
-
-       fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
-       snprintf(buf, len, fmt, chipid);
-       return buf;
-}
-
-static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
-                                                  u32 drivestrength) {
-       struct sdiod_drive_str *str_tab = NULL;
-       u32 str_mask = 0;
-       u32 str_shift = 0;
-       char chn[8];
-
-       if (!(bus->ci->cccaps & CC_CAP_PMU))
-               return;
-
-       switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
-       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
-               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
-               str_mask = 0x30000000;
-               str_shift = 28;
-               break;
-       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
-       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
-               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
-               str_mask = 0x00003800;
-               str_shift = 11;
-               break;
-       case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
-               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
-               str_mask = 0x00003800;
-               str_shift = 11;
-               break;
-       default:
-               brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
-                         brcmf_chipname(bus->ci->chip, chn, 8),
-                         bus->ci->chiprev, bus->ci->pmurev);
-               break;
-       }
-
-       if (str_tab != NULL) {
-               u32 drivestrength_sel = 0;
-               u32 cc_data_temp;
-               int i;
-
-               for (i = 0; str_tab[i].strength != 0; i++) {
-                       if (drivestrength >= str_tab[i].strength) {
-                               drivestrength_sel = str_tab[i].sel;
-                               break;
-                       }
-               }
-
-               brcmf_sdcard_reg_write(bus->sdiodev,
-                       CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
-                       4, 1);
-               cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
-                       CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
-               cc_data_temp &= ~str_mask;
-               drivestrength_sel <<= str_shift;
-               cc_data_temp |= drivestrength_sel;
-               brcmf_sdcard_reg_write(bus->sdiodev,
-                       CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
-                       4, cc_data_temp);
-
-               brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
-                         drivestrength, cc_data_temp);
-       }
-}
-
-static int
-brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
-                             struct chip_info *ci, u32 regs)
-{
-       u32 regdata;
-
-       /*
-        * Get CC core rev
-        * Chipid is assume to be at offset 0 from regs arg
-        * For different chiptypes or old sdio hosts w/o chipcommon,
-        * other ways of recognition should be added here.
-        */
-       ci->cccorebase = regs;
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-                               CORE_CC_REG(ci->cccorebase, chipid), 4);
-       ci->chip = regdata & CID_ID_MASK;
-       ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-
-       brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
-
-       /* Address of cores for new chips should be added here */
-       switch (ci->chip) {
-       case BCM4329_CHIP_ID:
-               ci->buscorebase = BCM4329_CORE_BUS_BASE;
-               ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
-               ci->armcorebase = BCM4329_CORE_ARM_BASE;
-               ci->ramsize = BCM4329_RAMSIZE;
-               break;
-       default:
-               brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
-               return -ENODEV;
-       }
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-               CORE_SB(ci->cccorebase, sbidhigh), 4);
-       ci->ccrev = SBCOREREV(regdata);
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-               CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
-       ci->pmurev = regdata & PCAP_REV_MASK;
-
-       regdata = brcmf_sdcard_reg_read(sdiodev,
-                                       CORE_SB(ci->buscorebase, sbidhigh), 4);
-       ci->buscorerev = SBCOREREV(regdata);
-       ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
-
-       brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
-                 ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
-
-       /* get chipcommon capabilites */
-       ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
-               CORE_CC_REG(ci->cccorebase, capabilities), 4);
-
-       return 0;
-}
-
-static int
-brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
-{
-       struct chip_info *ci;
-       int err;
-       u8 clkval, clkset;
-
-       brcmf_dbg(TRACE, "Enter\n");
-
-       /* alloc chip_info_t */
-       ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
-       if (NULL == ci)
-               return -ENOMEM;
-
-       /* bus/core/clk setup for register access */
-       /* Try forcing SDIO core to do ALPAvail request only */
-       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-       brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-                              SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
-       if (err) {
-               brcmf_dbg(ERROR, "error writing for HT off\n");
-               goto fail;
-       }
-
-       /* If register supported, wait for ALPAvail and then force ALP */
-       /* This may take up to 15 milliseconds */
-       clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-                       SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-       if ((clkval & ~SBSDIO_AVBITS) == clkset) {
-               SPINWAIT(((clkval =
-                               brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-                                               SBSDIO_FUNC1_CHIPCLKCSR,
-                                               NULL)),
-                               !SBSDIO_ALPAV(clkval)),
-                               PMU_MAX_TRANSITION_DLY);
-               if (!SBSDIO_ALPAV(clkval)) {
-                       brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
-                                 clkval);
-                       err = -EBUSY;
-                       goto fail;
-               }
-               clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
-                               SBSDIO_FORCE_ALP;
-               brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-                               SBSDIO_FUNC1_CHIPCLKCSR,
-                               clkset, &err);
-               udelay(65);
-       } else {
-               brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
-                         clkset, clkval);
-               err = -EACCES;
-               goto fail;
-       }
-
-       /* Also, disable the extra SDIO pull-ups */
-       brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-                              SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
-       err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
-       if (err)
-               goto fail;
-
-       /*
-        * Make sure any on-chip ARM is off (in case strapping is wrong),
-        * or downloaded code was already running.
-        */
-       brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
-
-       brcmf_sdcard_reg_write(bus->sdiodev,
-               CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
-       brcmf_sdcard_reg_write(bus->sdiodev,
-               CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
-
-       /* Disable F2 to clear any intermediate frame state on the dongle */
-       brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
-               SDIO_FUNC_ENABLE_1, NULL);
-
-       /* WAR: cmd52 backplane read so core HW will drop ALPReq */
-       clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
-                       0, NULL);
-
-       /* Done with backplane-dependent accesses, can drop clock... */
-       brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
-                              SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
-       bus->ci = ci;
-       return 0;
-fail:
-       bus->ci = NULL;
-       kfree(ci);
-       return err;
-}
-
 static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
 {
        u8 clkctl = 0;
        int err = 0;
        int reg_addr;
        u32 reg_val;
+       u8 idx;
 
        bus->alp_only = true;
 
@@ -4234,7 +3708,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
 #endif                         /* BCMDBG */
 
        /*
-        * Force PLL off until brcmf_sdbrcm_chip_attach()
+        * Force PLL off until brcmf_sdio_chip_attach()
         * programs PLL control regs
         */
 
@@ -4252,8 +3726,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
                goto fail;
        }
 
-       if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
-               brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+       if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+               brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n");
                goto fail;
        }
 
@@ -4262,11 +3736,10 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
                goto fail;
        }
 
-       brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+       brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci,
+                                         SDIO_DRIVE_STRENGTH);
 
-       /* Get info on the ARM and SOCRAM cores... */
-       brcmf_sdcard_reg_read(bus->sdiodev,
-                 CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+       /* Get info on the SOCRAM cores... */
        bus->ramsize = bus->ci->ramsize;
        if (!(bus->ramsize)) {
                brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
@@ -4274,7 +3747,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
        }
 
        /* Set core control so an SDIO reset does a backplane reset */
-       reg_addr = bus->ci->buscorebase +
+       idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+       reg_addr = bus->ci->c_inf[idx].base +
                   offsetof(struct sdpcmd_regs, corecontrol);
        reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
        brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
@@ -4298,7 +3772,7 @@ fail:
        return false;
 }
 
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4306,7 +3780,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
        brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
                               SDIO_FUNC_ENABLE_1, NULL);
 
-       bus->drvr->busstate = BRCMF_BUS_DOWN;
+       bus->drvr->bus_if->state = BRCMF_BUS_DOWN;
        bus->sleeping = false;
        bus->rxflow = false;
 
@@ -4333,7 +3807,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
 static int
 brcmf_sdbrcm_watchdog_thread(void *data)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *)data;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
        allow_signal(SIGTERM);
        /* Run until signal received */
@@ -4341,7 +3815,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
                if (kthread_should_stop())
                        break;
                if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
-                       brcmf_sdbrcm_bus_watchdog(bus->drvr);
+                       brcmf_sdbrcm_bus_watchdog(bus);
                        /* Count the tick for reference */
                        bus->drvr->tickcnt++;
                } else
@@ -4353,7 +3827,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
 static void
 brcmf_sdbrcm_watchdog(unsigned long data)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *)data;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
 
        if (bus->watchdog_tsk) {
                complete(&bus->watchdog_wait);
@@ -4364,23 +3838,14 @@ brcmf_sdbrcm_watchdog(unsigned long data)
        }
 }
 
-static void
-brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
-{
-       brcmf_dbg(TRACE, "Enter\n");
-
-       kfree(bus->ci);
-       bus->ci = NULL;
-}
-
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
        if (bus->ci) {
                brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
                brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
-               brcmf_sdbrcm_chip_detach(bus);
+               brcmf_sdio_chip_detach(&bus->ci);
                if (bus->vars && bus->varsz)
                        kfree(bus->vars);
                bus->vars = NULL;
@@ -4390,7 +3855,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
 }
 
 /* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
 {
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4412,21 +3877,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
        brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
-                        u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
 {
        int ret;
-       struct brcmf_bus *bus;
-
-       /* Init global variables at run-time, not as part of the declaration.
-        * This is required to support init/de-init of the driver.
-        * Initialization
-        * of globals as part of the declaration results in non-deterministic
-        * behavior since the value of the globals may be different on the
-        * first time that the driver is initialized vs subsequent
-        * initializations.
-        */
-       brcmf_c_init();
+       struct brcmf_sdio *bus;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4434,12 +3888,13 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
         * regsva == SI_ENUM_BASE*/
 
        /* Allocate private bus interface state */
-       bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+       bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
        if (!bus)
                goto fail;
 
        bus->sdiodev = sdiodev;
        sdiodev->bus = bus;
+       skb_queue_head_init(&bus->glom);
        bus->txbound = BRCMF_TXBOUND;
        bus->rxbound = BRCMF_RXBOUND;
        bus->txminmax = BRCMF_TXMINMAX;
@@ -4485,7 +3940,7 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
        }
 
        /* Attach to the brcmf/OS/network interface */
-       bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
+       bus->drvr = brcmf_attach(bus, SDPCM_RESERVE, bus->sdiodev->dev);
        if (!bus->drvr) {
                brcmf_dbg(ERROR, "brcmf_attach failed\n");
                goto fail;
@@ -4521,9 +3976,10 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
                        goto fail;
                }
        }
-       /* Ok, have the per-port tell the stack we're open for business */
-       if (brcmf_net_attach(bus->drvr, 0) != 0) {
-               brcmf_dbg(ERROR, "Net attach failed!!\n");
+
+       /* add interface and open for business */
+       if (brcmf_add_if((struct brcmf_info *)bus->drvr, 0, "wlan%d", NULL)) {
+               brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
                goto fail;
        }
 
@@ -4536,7 +3992,7 @@ fail:
 
 void brcmf_sdbrcm_disconnect(void *ptr)
 {
-       struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+       struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4546,18 +4002,9 @@ void brcmf_sdbrcm_disconnect(void *ptr)
        brcmf_dbg(TRACE, "Disconnected\n");
 }
 
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
-       return &bus->sdiodev->func[2]->dev;
-}
-
 void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
 {
-       /* don't start the wd until fw is loaded */
-       if (bus->drvr->busstate == BRCMF_BUS_DOWN)
-               return;
-
        /* Totally stop the timer */
        if (!wdtick && bus->wd_timer_valid == true) {
                del_timer_sync(&bus->timer);
@@ -4566,6 +4013,10 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
                return;
        }
 
+       /* don't start the wd until fw is loaded */
+       if (bus->drvr->bus_if->state == BRCMF_BUS_DOWN)
+               return;
+
        if (wdtick) {
                if (bus->save_ms != BRCMF_WD_POLL_MS) {
                        if (bus->wd_timer_valid == true)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
new file mode 100644 (file)
index 0000000..f6b1822
--- /dev/null
@@ -0,0 +1,622 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ***** SDIO interface chip backplane handle functions ***** */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/card.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+
+#include <chipcommon.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <soc.h>
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+#include "sdio_chip.h"
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE          0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE       0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE          0x18002000
+#define BCM4329_RAMSIZE                        0x48000
+
+#define        SBCOREREV(sbidh) \
+       ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+         ((sbidh) & SSB_IDHIGH_RCLO))
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB                0
+#define SOCI_AI                1
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK           0xff000000
+#define CIB_REV_SHIFT          24
+
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+       u8 strength;    /* Pad Drive Strength in mA */
+       u8 sel;         /* Chip-specific select value */
+};
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
+       {
+       4, 0x2}, {
+       2, 0x3}, {
+       1, 0x0}, {
+       0, 0x0}
+       };
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
+       {
+       12, 0x7}, {
+       10, 0x6}, {
+       8, 0x5}, {
+       6, 0x4}, {
+       4, 0x2}, {
+       2, 0x1}, {
+       0, 0x0}
+       };
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
+       {
+       32, 0x7}, {
+       26, 0x6}, {
+       22, 0x5}, {
+       16, 0x4}, {
+       12, 0x3}, {
+       8, 0x2}, {
+       4, 0x1}, {
+       0, 0x0}
+       };
+
+u8
+brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+{
+       u8 idx;
+
+       for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+               if (coreid == ci->c_inf[idx].id)
+                       return idx;
+
+       return BRCMF_MAX_CORENUM;
+}
+
+static u32
+brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+                     struct chip_info *ci, u16 coreid)
+{
+       u32 regdata;
+       u8 idx;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+       return SBCOREREV(regdata);
+}
+
+static u32
+brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+                     struct chip_info *ci, u16 coreid)
+{
+       u8 idx;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+}
+
+static bool
+brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+                      struct chip_info *ci, u16 coreid)
+{
+       u32 regdata;
+       u8 idx;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+       regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+                   SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+       return (SSB_TMSLOW_CLOCK == regdata);
+}
+
+static bool
+brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+                      struct chip_info *ci, u16 coreid)
+{
+       u32 regdata;
+       u8 idx;
+       bool ret;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                                       ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+       ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                                       ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+                                       4);
+       ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+       return ret;
+}
+
+static void
+brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+                         struct chip_info *ci, u16 coreid)
+{
+       u32 regdata;
+       u8 idx;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+       if (regdata & SSB_TMSLOW_RESET)
+               return;
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+       if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+               /*
+                * set target reject and spin until busy is clear
+                * (preserve core-specific bits)
+                */
+               regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+               brcmf_sdcard_reg_write(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+                               4, regdata | SSB_TMSLOW_REJECT);
+
+               regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+               udelay(1);
+               SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+                       SSB_TMSHIGH_BUSY), 100000);
+
+               regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+               if (regdata & SSB_TMSHIGH_BUSY)
+                       brcmf_dbg(ERROR, "core state still busy\n");
+
+               regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+               if (regdata & SSB_IDLOW_INITIATOR) {
+                       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
+                               SSB_IMSTATE_REJECT;
+                       brcmf_sdcard_reg_write(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+                               regdata);
+                       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+                       udelay(1);
+                       SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+                               SSB_IMSTATE_BUSY), 100000);
+               }
+
+               /* set reset and reject while enabling the clocks */
+               brcmf_sdcard_reg_write(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+                       (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+                       SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+               regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+               udelay(10);
+
+               /* clear the initiator reject bit */
+               regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+               if (regdata & SSB_IDLOW_INITIATOR) {
+                       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+                               ~SSB_IMSTATE_REJECT;
+                       brcmf_sdcard_reg_write(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+                               regdata);
+               }
+       }
+
+       /* leave reset and reject asserted */
+       brcmf_sdcard_reg_write(sdiodev,
+               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+               (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+       udelay(1);
+}
+
+static void
+brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+                         struct chip_info *ci, u16 coreid)
+{
+       u8 idx;
+       u32 regdata;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       /* if core is already in reset, just return */
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                                       ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+                                       4);
+       if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+               return;
+
+       brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+                              4, 0);
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                                       ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+       udelay(10);
+
+       brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+                              4, BCMA_RESET_CTL_RESET);
+       udelay(1);
+}
+
+static void
+brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+                       struct chip_info *ci, u16 coreid)
+{
+       u32 regdata;
+       u8 idx;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       /*
+        * Must do the disable sequence first to work for
+        * arbitrary current core state.
+        */
+       brcmf_sdio_sb_coredisable(sdiodev, ci, coreid);
+
+       /*
+        * Now do the initialization sequence.
+        * set reset while enabling the clock and
+        * forcing them on throughout the core
+        */
+       brcmf_sdcard_reg_write(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+                       SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+       udelay(1);
+
+       /* clear any serror */
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+       if (regdata & SSB_TMSHIGH_SERR)
+               brcmf_sdcard_reg_write(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+       if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+               brcmf_sdcard_reg_write(sdiodev,
+                       CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+                       regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+
+       /* clear reset and allow it to propagate throughout the core */
+       brcmf_sdcard_reg_write(sdiodev,
+               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+               SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+       udelay(1);
+
+       /* leave clock enabled */
+       brcmf_sdcard_reg_write(sdiodev,
+                              CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+                              4, SSB_TMSLOW_CLOCK);
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                               CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+       udelay(1);
+}
+
+static void
+brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+                       struct chip_info *ci, u16 coreid)
+{
+       u8 idx;
+       u32 regdata;
+
+       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+       /* must disable first to work for arbitrary current core state */
+       brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
+
+       /* now do initialization sequence */
+       brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+                              4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                                       ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+       brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+                              4, 0);
+       udelay(1);
+
+       brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+                              4, BCMA_IOCTL_CLK);
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                                       ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+       udelay(1);
+}
+
+static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+                                      struct chip_info *ci, u32 regs)
+{
+       u32 regdata;
+
+       /*
+        * Get CC core rev
+        * Chipid is assume to be at offset 0 from regs arg
+        * For different chiptypes or old sdio hosts w/o chipcommon,
+        * other ways of recognition should be added here.
+        */
+       ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+       ci->c_inf[0].base = regs;
+       regdata = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+       ci->chip = regdata & CID_ID_MASK;
+       ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+       ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+       brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+       /* Address of cores for new chips should be added here */
+       switch (ci->chip) {
+       case BCM4329_CHIP_ID:
+               ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+               ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+               ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+               ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+               ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+               ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+               ci->ramsize = BCM4329_RAMSIZE;
+               break;
+       default:
+               brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+               return -ENODEV;
+       }
+
+       switch (ci->socitype) {
+       case SOCI_SB:
+               ci->iscoreup = brcmf_sdio_sb_iscoreup;
+               ci->corerev = brcmf_sdio_sb_corerev;
+               ci->coredisable = brcmf_sdio_sb_coredisable;
+               ci->resetcore = brcmf_sdio_sb_resetcore;
+               break;
+       case SOCI_AI:
+               ci->iscoreup = brcmf_sdio_ai_iscoreup;
+               ci->corerev = brcmf_sdio_ai_corerev;
+               ci->coredisable = brcmf_sdio_ai_coredisable;
+               ci->resetcore = brcmf_sdio_ai_resetcore;
+               break;
+       default:
+               brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int
+brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+{
+       int err = 0;
+       u8 clkval, clkset;
+
+       /* Try forcing SDIO core to do ALPAvail request only */
+       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+       brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+                              SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+       if (err) {
+               brcmf_dbg(ERROR, "error writing for HT off\n");
+               return err;
+       }
+
+       /* If register supported, wait for ALPAvail and then force ALP */
+       /* This may take up to 15 milliseconds */
+       clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+                                      SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+       if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+               brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+                         clkset, clkval);
+               return -EACCES;
+       }
+
+       SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+                               SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+                       !SBSDIO_ALPAV(clkval)),
+                       PMU_MAX_TRANSITION_DLY);
+       if (!SBSDIO_ALPAV(clkval)) {
+               brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+                         clkval);
+               return -EBUSY;
+       }
+
+       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+       brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+                              SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+       udelay(65);
+
+       /* Also, disable the extra SDIO pull-ups */
+       brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+                              SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+       return 0;
+}
+
+static void
+brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+                            struct chip_info *ci)
+{
+       /* get chipcommon rev */
+       ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+
+       /* get chipcommon capabilites */
+       ci->c_inf[0].caps =
+               brcmf_sdcard_reg_read(sdiodev,
+               CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+
+       /* get pmu caps & rev */
+       if (ci->c_inf[0].caps & CC_CAP_PMU) {
+               ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+               ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+       }
+
+       ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+
+       brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+                 ci->c_inf[0].rev, ci->pmurev,
+                 ci->c_inf[1].rev, ci->c_inf[1].id);
+
+       /*
+        * Make sure any on-chip ARM is off (in case strapping is wrong),
+        * or downloaded code was already running.
+        */
+       ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3);
+}
+
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+                          struct chip_info **ci_ptr, u32 regs)
+{
+       int ret;
+       struct chip_info *ci;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       /* alloc chip_info_t */
+       ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+       if (!ci)
+               return -ENOMEM;
+
+       ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+       if (ret != 0)
+               goto err;
+
+       ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+       if (ret != 0)
+               goto err;
+
+       brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+
+       brcmf_sdcard_reg_write(sdiodev,
+               CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
+       brcmf_sdcard_reg_write(sdiodev,
+               CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+
+       *ci_ptr = ci;
+       return 0;
+
+err:
+       kfree(ci);
+       return ret;
+}
+
+void
+brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+{
+       brcmf_dbg(TRACE, "Enter\n");
+
+       kfree(*ci_ptr);
+       *ci_ptr = NULL;
+}
+
+static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+{
+       const char *fmt;
+
+       fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+       snprintf(buf, len, fmt, chipid);
+       return buf;
+}
+
+void
+brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+                                 struct chip_info *ci, u32 drivestrength)
+{
+       struct sdiod_drive_str *str_tab = NULL;
+       u32 str_mask = 0;
+       u32 str_shift = 0;
+       char chn[8];
+
+       if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+               return;
+
+       switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
+               str_mask = 0x30000000;
+               str_shift = 28;
+               break;
+       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+       case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
+               str_mask = 0x00003800;
+               str_shift = 11;
+               break;
+       case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+               str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
+               str_mask = 0x00003800;
+               str_shift = 11;
+               break;
+       default:
+               brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+                         brcmf_sdio_chip_name(ci->chip, chn, 8),
+                         ci->chiprev, ci->pmurev);
+               break;
+       }
+
+       if (str_tab != NULL) {
+               u32 drivestrength_sel = 0;
+               u32 cc_data_temp;
+               int i;
+
+               for (i = 0; str_tab[i].strength != 0; i++) {
+                       if (drivestrength >= str_tab[i].strength) {
+                               drivestrength_sel = str_tab[i].sel;
+                               break;
+                       }
+               }
+
+               brcmf_sdcard_reg_write(sdiodev,
+                       CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+                       4, 1);
+               cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
+                       CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+               cc_data_temp &= ~str_mask;
+               drivestrength_sel <<= str_shift;
+               cc_data_temp |= drivestrength_sel;
+               brcmf_sdcard_reg_write(sdiodev,
+                       CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+                       4, cc_data_temp);
+
+               brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+                         drivestrength, cc_data_temp);
+       }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
new file mode 100644 (file)
index 0000000..ce974d7
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMFMAC_SDIO_CHIP_H_
+#define _BRCMFMAC_SDIO_CHIP_H_
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+               (base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+               (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+               (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP               0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT                        0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP               0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ           0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ            0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF     0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL               0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL                        0x80
+#define SBSDIO_AVBITS          (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)   ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)    (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+       (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+#define BRCMF_MAX_CORENUM      6
+
+struct chip_core_info {
+       u16 id;
+       u16 rev;
+       u32 base;
+       u32 wrapbase;
+       u32 caps;
+       u32 cib;
+};
+
+struct chip_info {
+       u32 chip;
+       u32 chiprev;
+       u32 socitype;
+       /* core info */
+       /* always put chipcommon core at 0, bus core at 1 */
+       struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+       u32 pmurev;
+       u32 pmucaps;
+       u32 ramsize;
+
+       bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+                        u16 coreid);
+       u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+                        u16 coreid);
+       void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+                       struct chip_info *ci, u16 coreid);
+       void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+                       struct chip_info *ci, u16 coreid);
+};
+
+struct sbconfig {
+       u32 PAD[2];
+       u32 sbipsflag;  /* initiator port ocp slave flag */
+       u32 PAD[3];
+       u32 sbtpsflag;  /* target port ocp slave flag */
+       u32 PAD[11];
+       u32 sbtmerrloga;        /* (sonics >= 2.3) */
+       u32 PAD;
+       u32 sbtmerrlog; /* (sonics >= 2.3) */
+       u32 PAD[3];
+       u32 sbadmatch3; /* address match3 */
+       u32 PAD;
+       u32 sbadmatch2; /* address match2 */
+       u32 PAD;
+       u32 sbadmatch1; /* address match1 */
+       u32 PAD[7];
+       u32 sbimstate;  /* initiator agent state */
+       u32 sbintvec;   /* interrupt mask */
+       u32 sbtmstatelow;       /* target state */
+       u32 sbtmstatehigh;      /* target state */
+       u32 sbbwa0;             /* bandwidth allocation table0 */
+       u32 PAD;
+       u32 sbimconfiglow;      /* initiator configuration */
+       u32 sbimconfighigh;     /* initiator configuration */
+       u32 sbadmatch0; /* address match0 */
+       u32 PAD;
+       u32 sbtmconfiglow;      /* target configuration */
+       u32 sbtmconfighigh;     /* target configuration */
+       u32 sbbconfig;  /* broadcast configuration */
+       u32 PAD;
+       u32 sbbstate;   /* broadcast state */
+       u32 PAD[3];
+       u32 sbactcnfg;  /* activate configuration */
+       u32 PAD[3];
+       u32 sbflagst;   /* current sbflags */
+       u32 PAD[3];
+       u32 sbidlow;            /* identification */
+       u32 sbidhigh;   /* identification */
+};
+
+extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+                                 struct chip_info **ci_ptr, u32 regs);
+extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+                                             struct chip_info *ci,
+                                             u32 drivestrength);
+extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+
+
+#endif         /* _BRCMFMAC_SDIO_CHIP_H_ */
index 726fa89..c4c2543 100644 (file)
@@ -132,9 +132,9 @@ struct brcmf_sdio_dev {
        atomic_t suspend;               /* suspend flag */
        wait_queue_head_t request_byte_wait;
        wait_queue_head_t request_word_wait;
-       wait_queue_head_t request_packet_wait;
+       wait_queue_head_t request_chain_wait;
        wait_queue_head_t request_buffer_wait;
-
+       struct device *dev;
 };
 
 /* Register/deregister device interrupt handler. */
@@ -182,11 +182,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
  * NOTE: Async operation is not currently supported.
  */
 extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt);
+extern int
 brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+                     uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                     uint flags, struct sk_buff *pkt);
 extern int
 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+                     uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                       uint flags, struct sk_buff_head *pktq);
 
 /* Flags bits */
 
@@ -237,16 +247,18 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
 /* read or write any buffer using cmd53 */
 extern int
 brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
-                          uint fix_inc, uint rw, uint fnc_num,
-                          u32 addr, uint regwidth,
-                          u32 buflen, u8 *buffer, struct sk_buff *pkt);
+                          uint fix_inc, uint rw, uint fnc_num, u32 addr,
+                          struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+                         uint write, uint func, uint addr,
+                         struct sk_buff_head *pktq);
 
 /* Watchdog timer interface for pm ops */
 extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
                                    bool enable);
 
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
-                               u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
 extern void brcmf_sdbrcm_disconnect(void *ptr);
 extern void brcmf_sdbrcm_isr(void *arg);
 #endif                         /* _BRCM_SDH_H_ */
index 5eddabe..f23b0c3 100644 (file)
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
 
 static s32
 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
-                        enum nl80211_tx_power_setting type, s32 dbm)
+                           enum nl80211_tx_power_setting type, s32 mbm)
 {
 
        struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
        u16 txpwrmw;
        s32 err = 0;
        s32 disable = 0;
+       s32 dbm = MBM_TO_DBM(mbm);
 
        WL_TRACE("Enter\n");
        if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
        case NL80211_TX_POWER_AUTOMATIC:
                break;
        case NL80211_TX_POWER_LIMITED:
-               if (dbm < 0) {
-                       WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
-                       err = -EINVAL;
-                       goto done;
-               }
-               break;
        case NL80211_TX_POWER_FIXED:
                if (dbm < 0) {
                        WL_ERR("TX_POWER_FIXED - dbm is negative\n");
@@ -1997,7 +1992,7 @@ done:
 }
 
 static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
-                                  struct brcmf_bss_info *bi)
+                                  struct brcmf_bss_info_le *bi)
 {
        struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
        struct ieee80211_channel *notify_channel;
@@ -2049,18 +2044,27 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
                notify_timestamp, notify_capability, notify_interval, notify_ie,
                notify_ielen, notify_signal, GFP_KERNEL);
 
-       if (!bss) {
-               WL_ERR("cfg80211_inform_bss_frame error\n");
-               return -EINVAL;
-       }
+       if (!bss)
+               return -ENOMEM;
+
+       cfg80211_put_bss(bss);
 
        return err;
 }
 
+static struct brcmf_bss_info_le *
+next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
+{
+       if (bss == NULL)
+               return list->bss_info_le;
+       return (struct brcmf_bss_info_le *)((unsigned long)bss +
+                                           le32_to_cpu(bss->length));
+}
+
 static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
 {
        struct brcmf_scan_results *bss_list;
-       struct brcmf_bss_info *bi = NULL;       /* must be initialized */
+       struct brcmf_bss_info_le *bi = NULL;    /* must be initialized */
        s32 err = 0;
        int i;
 
@@ -2072,7 +2076,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
        }
        WL_SCAN("scanned AP count (%d)\n", bss_list->count);
        for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
-               bi = next_bss(bss_list, bi);
+               bi = next_bss_le(bss_list, bi);
                err = brcmf_inform_single_bss(cfg_priv, bi);
                if (err)
                        break;
@@ -2085,8 +2089,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
 {
        struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
        struct ieee80211_channel *notify_channel;
-       struct brcmf_bss_info *bi = NULL;
+       struct brcmf_bss_info_le *bi = NULL;
        struct ieee80211_supported_band *band;
+       struct cfg80211_bss *bss;
        u8 *buf = NULL;
        s32 err = 0;
        u16 channel;
@@ -2114,7 +2119,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
                goto CleanUp;
        }
 
-       bi = (struct brcmf_bss_info *)(buf + 4);
+       bi = (struct brcmf_bss_info_le *)(buf + 4);
 
        channel = bi->ctl_ch ? bi->ctl_ch :
                                CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
@@ -2140,10 +2145,17 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
        WL_CONN("signal: %d\n", notify_signal);
        WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
 
-       cfg80211_inform_bss(wiphy, notify_channel, bssid,
+       bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
                notify_timestamp, notify_capability, notify_interval,
                notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
 
+       if (!bss) {
+               err = -ENOMEM;
+               goto CleanUp;
+       }
+
+       cfg80211_put_bss(bss);
+
 CleanUp:
 
        kfree(buf);
@@ -2188,7 +2200,7 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
 
 static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
 {
-       struct brcmf_bss_info *bi;
+       struct brcmf_bss_info_le *bi;
        struct brcmf_ssid *ssid;
        struct brcmf_tlv *tim;
        u16 beacon_interval;
@@ -2211,7 +2223,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
                goto update_bss_info_out;
        }
 
-       bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+       bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
        err = brcmf_inform_single_bss(cfg_priv, bi);
        if (err)
                goto update_bss_info_out;
index 62dc461..a613b49 100644 (file)
@@ -352,15 +352,6 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
        return &cfg->conn_info;
 }
 
-static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
-                                          struct brcmf_bss_info *bss)
-{
-       return bss = bss ?
-               (struct brcmf_bss_info *)((unsigned long)bss +
-                                      le32_to_cpu(bss->length)) :
-               list->bss_info;
-}
-
 extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
                                                        struct device *busdev,
                                                        void *data);
index 025fa0e..39e3054 100644 (file)
@@ -16,6 +16,8 @@
  * File contents: support functions for PCI/PCIe
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/pci.h>
 
 #define PCI_FORCEHT(si)        (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
 
 #ifdef BCMDBG
-#define        SI_MSG(args)    printk args
+#define        SI_MSG(fmt, ...)        pr_debug(fmt, ##__VA_ARGS__)
 #else
-#define        SI_MSG(args)
+#define        SI_MSG(fmt, ...)        no_printk(fmt, ##__VA_ARGS__)
 #endif                         /* BCMDBG */
 
 #define        GOODCOREADDR(x, b) \
@@ -1073,7 +1075,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
 
        /* scan for cores */
        if (socitype == SOCI_AI) {
-               SI_MSG(("Found chip type AI (0x%08x)\n", w));
+               SI_MSG("Found chip type AI (0x%08x)\n", w);
                /* pass chipc address instead of original core base */
                ai_scan(&sii->pub, cc);
        } else {
@@ -1129,7 +1131,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
                 * set chipControl register bit 15
                 */
                if (sih->chiprev == 0) {
-                       SI_MSG(("Applying 43224A0 WARs\n"));
+                       SI_MSG("Applying 43224A0 WARs\n");
                        ai_corereg(sih, SI_CC_IDX,
                                   offsetof(struct chipcregs, chipcontrol),
                                   CCTRL43224_GPIO_TOGGLE,
@@ -1138,7 +1140,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
                                           CCTRL_43224A0_12MA_LED_DRIVE);
                }
                if (sih->chiprev >= 1) {
-                       SI_MSG(("Applying 43224B0+ WARs\n"));
+                       SI_MSG("Applying 43224B0+ WARs\n");
                        si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
                                           CCTRL_43224B0_12MA_LED_DRIVE);
                }
@@ -1149,7 +1151,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
                 * enable 12 mA drive strenth for 4313 and
                 * set chipControl register bit 1
                 */
-               SI_MSG(("Applying 4313 WARs\n"));
+               SI_MSG("Applying 4313 WARs\n");
                si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
                                   CCTRL_4313_12MA_LED_DRIVE);
        }
index 106a742..b51d1e4 100644 (file)
 /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
 #define SI_PCIE_DMA_H32                0x80000000
 
-/* core codes */
-#define        NODEV_CORE_ID           0x700   /* Invalid coreid */
-#define        CC_CORE_ID              0x800   /* chipcommon core */
-#define        ILINE20_CORE_ID         0x801   /* iline20 core */
-#define        SRAM_CORE_ID            0x802   /* sram core */
-#define        SDRAM_CORE_ID           0x803   /* sdram core */
-#define        PCI_CORE_ID             0x804   /* pci core */
-#define        MIPS_CORE_ID            0x805   /* mips core */
-#define        ENET_CORE_ID            0x806   /* enet mac core */
-#define        CODEC_CORE_ID           0x807   /* v90 codec core */
-#define        USB_CORE_ID             0x808   /* usb 1.1 host/device core */
-#define        ADSL_CORE_ID            0x809   /* ADSL core */
-#define        ILINE100_CORE_ID        0x80a   /* iline100 core */
-#define        IPSEC_CORE_ID           0x80b   /* ipsec core */
-#define        UTOPIA_CORE_ID          0x80c   /* utopia core */
-#define        PCMCIA_CORE_ID          0x80d   /* pcmcia core */
-#define        SOCRAM_CORE_ID          0x80e   /* internal memory core */
-#define        MEMC_CORE_ID            0x80f   /* memc sdram core */
-#define        OFDM_CORE_ID            0x810   /* OFDM phy core */
-#define        EXTIF_CORE_ID           0x811   /* external interface core */
-#define        D11_CORE_ID             0x812   /* 802.11 MAC core */
-#define        APHY_CORE_ID            0x813   /* 802.11a phy core */
-#define        BPHY_CORE_ID            0x814   /* 802.11b phy core */
-#define        GPHY_CORE_ID            0x815   /* 802.11g phy core */
-#define        MIPS33_CORE_ID          0x816   /* mips3302 core */
-#define        USB11H_CORE_ID          0x817   /* usb 1.1 host core */
-#define        USB11D_CORE_ID          0x818   /* usb 1.1 device core */
-#define        USB20H_CORE_ID          0x819   /* usb 2.0 host core */
-#define        USB20D_CORE_ID          0x81a   /* usb 2.0 device core */
-#define        SDIOH_CORE_ID           0x81b   /* sdio host core */
-#define        ROBO_CORE_ID            0x81c   /* roboswitch core */
-#define        ATA100_CORE_ID          0x81d   /* parallel ATA core */
-#define        SATAXOR_CORE_ID         0x81e   /* serial ATA & XOR DMA core */
-#define        GIGETH_CORE_ID          0x81f   /* gigabit ethernet core */
-#define        PCIE_CORE_ID            0x820   /* pci express core */
-#define        NPHY_CORE_ID            0x821   /* 802.11n 2x2 phy core */
-#define        SRAMC_CORE_ID           0x822   /* SRAM controller core */
-#define        MINIMAC_CORE_ID         0x823   /* MINI MAC/phy core */
-#define        ARM11_CORE_ID           0x824   /* ARM 1176 core */
-#define        ARM7S_CORE_ID           0x825   /* ARM7tdmi-s core */
-#define        LPPHY_CORE_ID           0x826   /* 802.11a/b/g phy core */
-#define        PMU_CORE_ID             0x827   /* PMU core */
-#define        SSNPHY_CORE_ID          0x828   /* 802.11n single-stream phy core */
-#define        SDIOD_CORE_ID           0x829   /* SDIO device core */
-#define        ARMCM3_CORE_ID          0x82a   /* ARM Cortex M3 core */
-#define        HTPHY_CORE_ID           0x82b   /* 802.11n 4x4 phy core */
-#define        MIPS74K_CORE_ID         0x82c   /* mips 74k core */
-#define        GMAC_CORE_ID            0x82d   /* Gigabit MAC core */
-#define        DMEMC_CORE_ID           0x82e   /* DDR1/2 memory controller core */
-#define        PCIERC_CORE_ID          0x82f   /* PCIE Root Complex core */
-#define        OCP_CORE_ID             0x830   /* OCP2OCP bridge core */
-#define        SC_CORE_ID              0x831   /* shared common core */
-#define        AHB_CORE_ID             0x832   /* OCP2AHB bridge core */
-#define        SPIH_CORE_ID            0x833   /* SPI host core */
-#define        I2S_CORE_ID             0x834   /* I2S core */
-#define        DMEMS_CORE_ID           0x835   /* SDR/DDR1 memory controller core */
-#define        DEF_SHIM_COMP           0x837   /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID     0x367   /* OOB router core ID */
-#define        DEF_AI_COMP             0xfff   /* Default component, in ai chips it
-                                        * maps all unused address ranges
-                                        */
-
 /* chipcommon being the first core: */
 #define        SI_CC_IDX               0
 
 /* SOC Interconnect types (aka chip types) */
 #define        SOCI_AI                 1
 
-/* Common core control flags */
-#define        SICF_BIST_EN            0x8000
-#define        SICF_PME_EN             0x4000
-#define        SICF_CORE_BITS          0x3ffc
-#define        SICF_FGC                0x0002
-#define        SICF_CLOCK_EN           0x0001
-
-/* Common core status flags */
-#define        SISF_BIST_DONE          0x8000
-#define        SISF_BIST_ERROR         0x4000
-#define        SISF_GATED_CLK          0x2000
-#define        SISF_DMA64              0x1000
-#define        SISF_CORE_BITS          0x0fff
-
 /* A register that is common to all cores to
  * communicate w/PMU regarding clock control.
  */
index 7f27dbd..43f7a72 100644 (file)
@@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
                len = roundup(len, 4);
                ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
 
-               dma_len += (u16) brcmu_pkttotlen(p);
+               dma_len += (u16) p->len;
 
                BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
                        " seg_cnt %d null delim %d\n",
@@ -741,9 +741,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
                if (p) {
                        if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
                            ((u8) (p->priority) == tid)) {
-
-                               plen = brcmu_pkttotlen(p) +
-                                      AMPDU_MAX_MPDU_OVERHEAD;
+                               plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
                                plen = max(scb_ampdu->min_len, plen);
 
                                if ((plen + ampdu_len) > max_ampdu_bytes) {
index 89ad1b7..55e9f45 100644 (file)
@@ -1153,121 +1153,6 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
                              &txpwr);
 }
 
-#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
-{
-       int i;
-       char buf[80];
-       char fraction[4][4] = { "   ", ".25", ".5 ", ".75" };
-
-       sprintf(buf, "CCK                ");
-       for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "20 MHz OFDM SISO   ");
-       for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "20 MHz OFDM CDD    ");
-       for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "40 MHz OFDM SISO   ");
-       for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->ofdm_40_siso[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "40 MHz OFDM CDD    ");
-       for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->ofdm_40_cdd[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "20 MHz MCS0-7 SISO ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_20_siso[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "20 MHz MCS0-7 CDD  ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_20_cdd[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "20 MHz MCS0-7 STBC ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_20_stbc[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "20 MHz MCS8-15 SDM ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_20_mimo[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "40 MHz MCS0-7 SISO ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_40_siso[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "40 MHz MCS0-7 CDD  ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_40_cdd[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "40 MHz MCS0-7 STBC ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_40_stbc[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       printk(KERN_DEBUG "%s\n", buf);
-
-       sprintf(buf, "40 MHz MCS8-15 SDM ");
-       for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
-               sprintf(buf[strlen(buf)], " %2d%s",
-                       txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
-                       fraction[txpwr->mcs_40_mimo[i] %
-                                                       BRCMS_TXPWR_DB_FACTOR]);
-       }
-       printk(KERN_DEBUG "%s\n", buf);
-
-       printk(KERN_DEBUG "MCS32               %2d%s\n",
-              txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
-              fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
-}
-#endif                         /* POWER_DBG */
-
 void
 brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
                       struct txpwr_limits *txpwr)
@@ -1478,9 +1363,6 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
                        txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
        }
 
-#ifdef POWER_DBG
-       wlc_phy_txpower_limits_dump(txpwr);
-#endif
        return;
 }
 
index 6ebec8f..b55b1f6 100644 (file)
  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
-#include <linux/skbuff.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
 
@@ -22,6 +24,7 @@
 #include <aiutils.h>
 #include "types.h"
 #include "dma.h"
+#include "soc.h"
 
 /*
  * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
 
 /* debug/trace */
 #ifdef BCMDBG
-#define        DMA_ERROR(args) \
-       do { \
-               if (!(*di->msg_level & 1)) \
-                       ; \
-               else \
-                       printk args; \
-       } while (0)
-#define        DMA_TRACE(args) \
-       do { \
-               if (!(*di->msg_level & 2)) \
-                       ; \
-               else \
-                       printk args; \
-       } while (0)
+#define        DMA_ERROR(fmt, ...)                                     \
+do {                                                           \
+       if (*di->msg_level & 1)                                 \
+               pr_debug("%s: " fmt, __func__, ##__VA_ARGS__);  \
+} while (0)
+#define        DMA_TRACE(fmt, ...)                                     \
+do {                                                           \
+       if (*di->msg_level & 2)                                 \
+               pr_debug("%s: " fmt, __func__, ##__VA_ARGS__);  \
+} while (0)
 #else
-#define        DMA_ERROR(args)
-#define        DMA_TRACE(args)
+#define        DMA_ERROR(fmt, ...)                     \
+       no_printk(fmt, ##__VA_ARGS__)
+#define        DMA_TRACE(fmt, ...)                     \
+       no_printk(fmt, ##__VA_ARGS__)
 #endif                         /* BCMDBG */
 
-#define        DMA_NONE(args)
+#define        DMA_NONE(fmt, ...)                      \
+       no_printk(fmt, ##__VA_ARGS__)
 
 #define        MAXNAMEL        8       /* 8 char names */
 
@@ -361,7 +363,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
        uint dmactrlflags;
 
        if (di == NULL) {
-               DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
+               DMA_ERROR("NULL dma handle\n");
                return 0;
        }
 
@@ -412,13 +414,13 @@ static bool _dma_isaddrext(struct dma_info *di)
        /* not all tx or rx channel are available */
        if (di->d64txregs != NULL) {
                if (!_dma64_addrext(di->d64txregs))
-                       DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
-                                  "AE set\n", di->name));
+                       DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
+                                 di->name);
                return true;
        } else if (di->d64rxregs != NULL) {
                if (!_dma64_addrext(di->d64rxregs))
-                       DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
-                                  "AE set\n", di->name));
+                       DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
+                                 di->name);
                return true;
        }
 
@@ -519,8 +521,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
                va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
                        &alloced, &di->txdpaorig);
                if (va == NULL) {
-                       DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
-                                  " failed\n", di->name));
+                       DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+                                 di->name);
                        return false;
                }
                align = (1 << align_bits);
@@ -533,8 +535,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
                va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
                        &alloced, &di->rxdpaorig);
                if (va == NULL) {
-                       DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
-                                  " failed\n", di->name));
+                       DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+                                 di->name);
                        return false;
                }
                align = (1 << align_bits);
@@ -583,11 +585,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
         */
        _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
 
-       DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
-                  "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
-                  "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
-                  di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
-                  rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+       DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
+                 name, "DMA64",
+                 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+                 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
 
        /* make a private copy of our callers name */
        strncpy(di->name, name, MAXNAMEL);
@@ -645,8 +646,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
                di->dmadesc_align = 4;  /* 16 byte alignment */
        }
 
-       DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
-                 di->aligndesc_4k, di->dmadesc_align));
+       DMA_NONE("DMA descriptor align_needed %d, align %d\n",
+                di->aligndesc_4k, di->dmadesc_align);
 
        /* allocate tx packet pointer vector */
        if (ntxd) {
@@ -684,21 +685,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
 
        if ((di->ddoffsetlow != 0) && !di->addrext) {
                if (di->txdpa > SI_PCI_DMA_SZ) {
-                       DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
-                                  "supported\n", di->name, (u32)di->txdpa));
+                       DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
+                                 di->name, (u32)di->txdpa);
                        goto fail;
                }
                if (di->rxdpa > SI_PCI_DMA_SZ) {
-                       DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
-                                  "supported\n", di->name, (u32)di->rxdpa));
+                       DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
+                                 di->name, (u32)di->rxdpa);
                        goto fail;
                }
        }
 
-       DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
-                  "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
-                  di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
-                  di->addrext));
+       DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+                 di->ddoffsetlow, di->ddoffsethigh,
+                 di->dataoffsetlow, di->dataoffsethigh,
+                 di->addrext);
 
        return (struct dma_pub *) di;
 
@@ -744,7 +745,7 @@ void dma_detach(struct dma_pub *pub)
 {
        struct dma_info *di = (struct dma_info *)pub;
 
-       DMA_TRACE(("%s: dma_detach\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        /* free dma descriptor rings */
        if (di->txd64)
@@ -812,7 +813,7 @@ static void _dma_rxenable(struct dma_info *di)
        uint dmactrlflags = di->dma.dmactrlflags;
        u32 control;
 
-       DMA_TRACE(("%s: dma_rxenable\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        control =
            (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
@@ -832,7 +833,7 @@ void dma_rxinit(struct dma_pub *pub)
 {
        struct dma_info *di = (struct dma_info *)pub;
 
-       DMA_TRACE(("%s: dma_rxinit\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        if (di->nrxd == 0)
                return;
@@ -901,7 +902,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
 
 /*
  * !! rx entry routine
- * returns a pointer to the next frame received, or NULL if there are no more
+ * returns the number packages in the next frame, or 0 if there are no more
  *   if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
  *   supported with pkts chain
  *   otherwise, it's treated as giant pkt and will be tossed.
@@ -909,38 +910,40 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
  *   buffer data. After it reaches the max size of buffer, the data continues
  *   in next DMA descriptor buffer WITHOUT DMA header
  */
-struct sk_buff *dma_rx(struct dma_pub *pub)
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
 {
        struct dma_info *di = (struct dma_info *)pub;
-       struct sk_buff *p, *head, *tail;
+       struct sk_buff_head dma_frames;
+       struct sk_buff *p, *next;
        uint len;
        uint pkt_len;
        int resid = 0;
+       int pktcnt = 1;
 
+       skb_queue_head_init(&dma_frames);
  next_frame:
-       head = _dma_getnextrxp(di, false);
-       if (head == NULL)
-               return NULL;
+       p = _dma_getnextrxp(di, false);
+       if (p == NULL)
+               return 0;
 
-       len = le16_to_cpu(*(__le16 *) (head->data));
-       DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
-       dma_spin_for_len(len, head);
+       len = le16_to_cpu(*(__le16 *) (p->data));
+       DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+       dma_spin_for_len(len, p);
 
        /* set actual length */
        pkt_len = min((di->rxoffset + len), di->rxbufsize);
-       __skb_trim(head, pkt_len);
+       __skb_trim(p, pkt_len);
+       skb_queue_tail(&dma_frames, p);
        resid = len - (di->rxbufsize - di->rxoffset);
 
        /* check for single or multi-buffer rx */
        if (resid > 0) {
-               tail = head;
                while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
-                       tail->next = p;
                        pkt_len = min_t(uint, resid, di->rxbufsize);
                        __skb_trim(p, pkt_len);
-
-                       tail = p;
+                       skb_queue_tail(&dma_frames, p);
                        resid -= di->rxbufsize;
+                       pktcnt++;
                }
 
 #ifdef BCMDBG
@@ -951,26 +954,31 @@ struct sk_buff *dma_rx(struct dma_pub *pub)
                                  D64_RS0_CD_MASK) -
                                 di->rcvptrbase) & D64_RS0_CD_MASK,
                                struct dma64desc);
-                       DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
-                                  di->rxin, di->rxout, cur));
+                       DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
+                                 di->rxin, di->rxout, cur);
                }
 #endif                         /* BCMDBG */
 
                if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
-                       DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
-                                  di->name, len));
-                       brcmu_pkt_buf_free_skb(head);
+                       DMA_ERROR("%s: bad frame length (%d)\n",
+                                 di->name, len);
+                       skb_queue_walk_safe(&dma_frames, p, next) {
+                               skb_unlink(p, &dma_frames);
+                               brcmu_pkt_buf_free_skb(p);
+                       }
                        di->dma.rxgiants++;
+                       pktcnt = 1;
                        goto next_frame;
                }
        }
 
-       return head;
+       skb_queue_splice_tail(&dma_frames, skb_list);
+       return pktcnt;
 }
 
 static bool dma64_rxidle(struct dma_info *di)
 {
-       DMA_TRACE(("%s: dma_rxidle\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        if (di->nrxd == 0)
                return true;
@@ -1010,7 +1018,7 @@ bool dma_rxfill(struct dma_pub *pub)
 
        n = di->nrxpost - nrxdactive(di, rxin, rxout);
 
-       DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+       DMA_TRACE("%s: post %d\n", di->name, n);
 
        if (di->rxbufsize > BCMEXTRAHDROOM)
                extra_offset = di->rxextrahdrroom;
@@ -1023,11 +1031,9 @@ bool dma_rxfill(struct dma_pub *pub)
                p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
 
                if (p == NULL) {
-                       DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
-                                  di->name));
+                       DMA_ERROR("%s: out of rxbufs\n", di->name);
                        if (i == 0 && dma64_rxidle(di)) {
-                               DMA_ERROR(("%s: rxfill64: ring is empty !\n",
-                                          di->name));
+                               DMA_ERROR("%s: ring is empty !\n", di->name);
                                ring_empty = true;
                        }
                        di->dma.rxnobuf++;
@@ -1072,7 +1078,7 @@ void dma_rxreclaim(struct dma_pub *pub)
        struct dma_info *di = (struct dma_info *)pub;
        struct sk_buff *p;
 
-       DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        while ((p = _dma_getnextrxp(di, true)))
                brcmu_pkt_buf_free_skb(p);
@@ -1103,7 +1109,7 @@ void dma_txinit(struct dma_pub *pub)
        struct dma_info *di = (struct dma_info *)pub;
        u32 control = D64_XC_XE;
 
-       DMA_TRACE(("%s: dma_txinit\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        if (di->ntxd == 0)
                return;
@@ -1135,7 +1141,7 @@ void dma_txsuspend(struct dma_pub *pub)
 {
        struct dma_info *di = (struct dma_info *)pub;
 
-       DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        if (di->ntxd == 0)
                return;
@@ -1147,7 +1153,7 @@ void dma_txresume(struct dma_pub *pub)
 {
        struct dma_info *di = (struct dma_info *)pub;
 
-       DMA_TRACE(("%s: dma_txresume\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        if (di->ntxd == 0)
                return;
@@ -1169,11 +1175,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
        struct dma_info *di = (struct dma_info *)pub;
        struct sk_buff *p;
 
-       DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
-                  (range == DMA_RANGE_ALL) ? "all" :
-                  ((range ==
-                    DMA_RANGE_TRANSMITTED) ? "transmitted" :
-                   "transferred")));
+       DMA_TRACE("%s: %s\n",
+                 di->name,
+                 range == DMA_RANGE_ALL ? "all" :
+                 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+                 "transferred");
 
        if (di->txin == di->txout)
                return;
@@ -1233,65 +1239,51 @@ bool dma_rxreset(struct dma_pub *pub)
  *   the error(toss frames) could be fatal and cause many subsequent hard
  *   to debug problems
  */
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
 {
        struct dma_info *di = (struct dma_info *)pub;
-       struct sk_buff *p, *next;
        unsigned char *data;
        uint len;
        u16 txout;
        u32 flags = 0;
        dma_addr_t pa;
 
-       DMA_TRACE(("%s: dma_txfast\n", di->name));
+       DMA_TRACE("%s:\n", di->name);
 
        txout = di->txout;
 
        /*
-        * Walk the chain of packet buffers
-        * allocating and initializing transmit descriptor entries.
+        * obtain and initialize transmit descriptor entry.
         */
-       for (p = p0; p; p = next) {
-               data = p->data;
-               len = p->len;
-               next = p->next;
+       data = p->data;
+       len = p->len;
 
-               /* return nonzero if out of tx descriptors */
-               if (nexttxd(di, txout) == di->txin)
-                       goto outoftxd;
-
-               if (len == 0)
-                       continue;
+       /* no use to transmit a zero length packet */
+       if (len == 0)
+               return 0;
 
-               /* get physical address of buffer start */
-               pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+       /* return nonzero if out of tx descriptors */
+       if (nexttxd(di, txout) == di->txin)
+               goto outoftxd;
 
-               flags = 0;
-               if (p == p0)
-                       flags |= D64_CTRL1_SOF;
+       /* get physical address of buffer start */
+       pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
 
-               /* With a DMA segment list, Descriptor table is filled
-                * using the segment list instead of looping over
-                * buffers in multi-chain DMA. Therefore, EOF for SGLIST
-                * is when end of segment list is reached.
-                */
-               if (next == NULL)
-                       flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
-               if (txout == (di->ntxd - 1))
-                       flags |= D64_CTRL1_EOT;
-
-               dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+       /* With a DMA segment list, Descriptor table is filled
+        * using the segment list instead of looping over
+        * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+        * is when end of segment list is reached.
+        */
+       flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+       if (txout == (di->ntxd - 1))
+               flags |= D64_CTRL1_EOT;
 
-               txout = nexttxd(di, txout);
-       }
+       dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
 
-       /* if last txd eof not set, fix it */
-       if (!(flags & D64_CTRL1_EOF))
-               di->txd64[prevtxd(di, txout)].ctrl1 =
-                    cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+       txout = nexttxd(di, txout);
 
        /* save the packet */
-       di->txp[prevtxd(di, txout)] = p0;
+       di->txp[prevtxd(di, txout)] = p;
 
        /* bump the tx descriptor index */
        di->txout = txout;
@@ -1307,8 +1299,8 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
        return 0;
 
  outoftxd:
-       DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
-       brcmu_pkt_buf_free_skb(p0);
+       DMA_ERROR("%s: out of txds !!!\n", di->name);
+       brcmu_pkt_buf_free_skb(p);
        di->dma.txavail = 0;
        di->dma.txnobuf++;
        return -1;
@@ -1331,11 +1323,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
        u16 active_desc;
        struct sk_buff *txp;
 
-       DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
-                  (range == DMA_RANGE_ALL) ? "all" :
-                  ((range ==
-                    DMA_RANGE_TRANSMITTED) ? "transmitted" :
-                   "transferred")));
+       DMA_TRACE("%s: %s\n",
+                 di->name,
+                 range == DMA_RANGE_ALL ? "all" :
+                 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+                 "transferred");
 
        if (di->ntxd == 0)
                return NULL;
@@ -1395,8 +1387,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
        return txp;
 
  bogus:
-       DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
-                 "force %d\n", start, end, di->txout, forceall));
+       DMA_NONE("bogus curr: start %d end %d txout %d\n",
+                start, end, di->txout);
        return NULL;
 }
 
index ebc5bc5..d317c7c 100644 (file)
@@ -18,6 +18,7 @@
 #define        _BRCM_DMA_H_
 
 #include <linux/delay.h>
+#include <linux/skbuff.h>
 #include "types.h"             /* forward structure declarations */
 
 /* map/unmap direction */
@@ -80,7 +81,7 @@ extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
                            uint nrxpost, uint rxoffset, uint *msg_level);
 
 void dma_rxinit(struct dma_pub *pub);
-struct sk_buff *dma_rx(struct dma_pub *pub);
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
 bool dma_rxfill(struct dma_pub *pub);
 bool dma_rxreset(struct dma_pub *pub);
 bool dma_txreset(struct dma_pub *pub);
index 0d8a9cd..76376eb 100644 (file)
 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
        FIF_ALLMULTI | \
        FIF_FCSFAIL | \
-       FIF_PLCPFAIL | \
        FIF_CONTROL | \
        FIF_OTHER_BSS | \
-       FIF_BCN_PRBRESP_PROMISC)
+       FIF_BCN_PRBRESP_PROMISC | \
+       FIF_PSPOLL)
 
 #define CHAN2GHZ(channel, freqency, chflags)  { \
        .band = IEEE80211_BAND_2GHZ, \
@@ -216,8 +216,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
        .ht_cap = {
                   /* from include/linux/ieee80211.h */
                   .cap = IEEE80211_HT_CAP_GRN_FLD |
-                  IEEE80211_HT_CAP_SGI_20 |
-                  IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
+                         IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40,
                   .ht_supported = true,
                   .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
                   .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -238,8 +237,7 @@ static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
                        BRCMS_LEGACY_5G_RATE_OFFSET,
        .ht_cap = {
                   .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
-                         IEEE80211_HT_CAP_SGI_40 |
-                         IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
+                         IEEE80211_HT_CAP_SGI_40,
                   .ht_supported = true,
                   .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
                   .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -287,6 +285,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
 {
        struct brcms_info *wl = hw->priv;
        bool blocked;
+       int err;
 
        ieee80211_wake_queues(hw);
        spin_lock_bh(&wl->lock);
@@ -295,57 +294,69 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        if (!blocked)
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
 
-       return 0;
+       spin_lock_bh(&wl->lock);
+       /* avoid acknowledging frames before a non-monitor device is added */
+       wl->mute_tx = true;
+
+       if (!wl->pub->up)
+               err = brcms_up(wl);
+       else
+               err = -ENODEV;
+       spin_unlock_bh(&wl->lock);
+
+       if (err != 0)
+               wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
+                         err);
+       return err;
 }
 
 static void brcms_ops_stop(struct ieee80211_hw *hw)
 {
+       struct brcms_info *wl = hw->priv;
+       int status;
+
        ieee80211_stop_queues(hw);
+
+       if (wl->wlc == NULL)
+               return;
+
+       spin_lock_bh(&wl->lock);
+       status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
+                                  wl->wlc->hw->deviceid);
+       spin_unlock_bh(&wl->lock);
+       if (!status) {
+               wiphy_err(wl->wiphy,
+                         "wl: brcms_ops_stop: chipmatch failed\n");
+               return;
+       }
+
+       /* put driver in down state */
+       spin_lock_bh(&wl->lock);
+       brcms_down(wl);
+       spin_unlock_bh(&wl->lock);
 }
 
 static int
 brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-       struct brcms_info *wl;
-       int err;
+       struct brcms_info *wl = hw->priv;
 
        /* Just STA for now */
-       if (vif->type != NL80211_IFTYPE_AP &&
-           vif->type != NL80211_IFTYPE_MESH_POINT &&
-           vif->type != NL80211_IFTYPE_STATION &&
-           vif->type != NL80211_IFTYPE_WDS &&
-           vif->type != NL80211_IFTYPE_ADHOC) {
+       if (vif->type != NL80211_IFTYPE_STATION) {
                wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
                          " STA for now\n", __func__, vif->type);
                return -EOPNOTSUPP;
        }
 
-       wl = hw->priv;
-       spin_lock_bh(&wl->lock);
-       if (!wl->pub->up)
-               err = brcms_up(wl);
-       else
-               err = -ENODEV;
-       spin_unlock_bh(&wl->lock);
-
-       if (err != 0)
-               wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
-                         err);
+       wl->mute_tx = false;
+       brcms_c_mute(wl->wlc, false);
 
-       return err;
+       return 0;
 }
 
 static void
 brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-       struct brcms_info *wl;
-
-       wl = hw->priv;
-
-       /* put driver in down state */
-       spin_lock_bh(&wl->lock);
-       brcms_down(wl);
-       spin_unlock_bh(&wl->lock);
 }
 
 static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -362,7 +373,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
                                                   conf->listen_interval);
        }
        if (changed & IEEE80211_CONF_CHANGE_MONITOR)
-               wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+               wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
                          __func__, conf->flags & IEEE80211_CONF_MONITOR ?
                          "true" : "false");
        if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -539,29 +550,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
 
        changed_flags &= MAC_FILTERS;
        *total_flags &= MAC_FILTERS;
+
        if (changed_flags & FIF_PROMISC_IN_BSS)
-               wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+               wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
        if (changed_flags & FIF_ALLMULTI)
-               wiphy_err(wiphy, "FIF_ALLMULTI\n");
+               wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
        if (changed_flags & FIF_FCSFAIL)
-               wiphy_err(wiphy, "FIF_FCSFAIL\n");
-       if (changed_flags & FIF_PLCPFAIL)
-               wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+               wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
        if (changed_flags & FIF_CONTROL)
-               wiphy_err(wiphy, "FIF_CONTROL\n");
+               wiphy_dbg(wiphy, "FIF_CONTROL\n");
        if (changed_flags & FIF_OTHER_BSS)
-               wiphy_err(wiphy, "FIF_OTHER_BSS\n");
-       if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
-               spin_lock_bh(&wl->lock);
-               if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
-                       wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
-                       brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
-               } else {
-                       brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
-                       wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
-               }
-               spin_unlock_bh(&wl->lock);
-       }
+               wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+       if (changed_flags & FIF_PSPOLL)
+               wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+       if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+               wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+       spin_lock_bh(&wl->lock);
+       brcms_c_mac_promisc(wl->wlc, *total_flags);
+       spin_unlock_bh(&wl->lock);
        return;
 }
 
@@ -609,13 +616,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        wl->pub->global_ampdu->scb = scb;
        wl->pub->global_ampdu->max_pdu = 16;
 
-       sta->ht_cap.ht_supported = true;
-       sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-       sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
-       sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
-           IEEE80211_HT_CAP_SGI_20 |
-           IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
-
        /*
         * minstrel_ht initiates addBA on our behalf by calling
         * ieee80211_start_tx_ba_session()
@@ -877,37 +877,18 @@ static void brcms_free(struct brcms_info *wl)
 }
 
 /*
-* called from both kernel as from this kernel module.
+* called from both kernel as from this kernel module (error flow on attach)
 * precondition: perimeter lock is not acquired.
 */
 static void brcms_remove(struct pci_dev *pdev)
 {
-       struct brcms_info *wl;
-       struct ieee80211_hw *hw;
-       int status;
-
-       hw = pci_get_drvdata(pdev);
-       wl = hw->priv;
-       if (!wl) {
-               pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
-               return;
-       }
+       struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+       struct brcms_info *wl = hw->priv;
 
-       spin_lock_bh(&wl->lock);
-       status = brcms_c_chipmatch(pdev->vendor, pdev->device);
-       spin_unlock_bh(&wl->lock);
-       if (!status) {
-               wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
-                                    "failed\n");
-               return;
-       }
        if (wl->wlc) {
                wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
                ieee80211_unregister_hw(hw);
-               spin_lock_bh(&wl->lock);
-               brcms_down(wl);
-               spin_unlock_bh(&wl->lock);
        }
        pci_disable_device(pdev);
 
@@ -1081,9 +1062,6 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
 
        wl->pub->ieee_hw = hw;
 
-       /* disable mpc */
-       brcms_c_set_radio_mpc(wl->wlc, false);
-
        /* register our interrupt handler */
        if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
                wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
@@ -1319,8 +1297,7 @@ void brcms_init(struct brcms_info *wl)
 {
        BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
        brcms_reset(wl);
-
-       brcms_c_init(wl->wlc);
+       brcms_c_init(wl->wlc, wl->mute_tx);
 }
 
 /*
@@ -1337,6 +1314,14 @@ uint brcms_reset(struct brcms_info *wl)
        return 0;
 }
 
+void brcms_fatal_error(struct brcms_info *wl)
+{
+       wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+                 wl->wlc->pub->unit);
+       brcms_reset(wl);
+       ieee80211_restart_hw(wl->pub->ieee_hw);
+}
+
 /*
  * These are interrupt on/off entry points. Disable interrupts
  * during interrupt state transition.
@@ -1561,11 +1546,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
                        if (le32_to_cpu(hdr->idx) == idx) {
                                pdata = wl->fw.fw_bin[i]->data +
                                        le32_to_cpu(hdr->offset);
-                               *pbuf = kmalloc(len, GFP_ATOMIC);
+                               *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
                                if (*pbuf == NULL)
                                        goto fail;
 
-                               memcpy(*pbuf, pdata, len);
                                return 0;
                        }
                }
index 177f0e4..6242f18 100644 (file)
@@ -80,6 +80,7 @@ struct brcms_info {
        struct brcms_firmware fw;
        struct wiphy *wiphy;
        struct brcms_ucode ucode;
+       bool mute_tx;
 };
 
 /* misc callbacks */
@@ -104,5 +105,6 @@ extern bool brcms_del_timer(struct brcms_timer *timer);
 extern void brcms_msleep(struct brcms_info *wl, uint ms);
 extern void brcms_dpc(unsigned long data);
 extern void brcms_timer(struct brcms_timer *t);
+extern void brcms_fatal_error(struct brcms_info *wl);
 
 #endif                         /* _BRCM_MAC80211_IF_H_ */
index 510e9bb..87f8f5d 100644 (file)
 #include "mac80211_if.h"
 #include "ucode_loader.h"
 #include "main.h"
+#include "soc.h"
 
 /*
  * Indication for txflowcontrol that all priority bits in
  * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
  */
-#define ALLPRIO                -1
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN       ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+#define ALLPRIO                                -1
 
 /* watchdog timer, in unit of ms */
-#define        TIMER_INTERVAL_WATCHDOG 1000
+#define TIMER_INTERVAL_WATCHDOG                1000
 /* radio monitor timer, in unit of ms */
-#define        TIMER_INTERVAL_RADIOCHK 800
-
-/* Max MPC timeout, in unit of watchdog */
-#ifndef BRCMS_MPC_MAX_DELAYCNT
-#define        BRCMS_MPC_MAX_DELAYCNT  10
-#endif
+#define TIMER_INTERVAL_RADIOCHK                800
 
-/* Min MPC timeout, in unit of watchdog */
-#define        BRCMS_MPC_MIN_DELAYCNT  1
-#define        BRCMS_MPC_THRESHOLD     3       /* MPC count threshold level */
-
-/* beacon interval, in unit of 1024TU */
-#define        BEACON_INTERVAL_DEFAULT 100
-/* DTIM interval, in unit of beacon interval */
-#define        DTIM_INTERVAL_DEFAULT   3
-
-/* Scale down delays to accommodate QT slow speed */
 /* beacon interval, in unit of 1024TU */
-#define        BEACON_INTERVAL_DEF_QT  20
-/* DTIM interval, in unit of beacon interval */
-#define        DTIM_INTERVAL_DEF_QT    1
-
-#define        TBTT_ALIGN_LEEWAY_US    100     /* min leeway before first TBTT in us */
+#define BEACON_INTERVAL_DEFAULT                100
 
 /* n-mode support capability */
 /* 2x2 includes both 1x1 & 2x2 devices
 #define WL_11N_3x3                     3
 #define WL_11N_4x4                     4
 
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N          0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX  0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX  0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX   0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX   0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF       0x00000080
-
-#define EDCF_ACI_MASK                0x60
-#define EDCF_ACI_SHIFT               5
-#define EDCF_ECWMIN_MASK             0x0f
-#define EDCF_ECWMAX_SHIFT            4
-#define EDCF_AIFSN_MASK              0x0f
-#define EDCF_AIFSN_MAX               15
-#define EDCF_ECWMAX_MASK             0xf0
-
-#define EDCF_AC_BE_TXOP_STA          0x0000
-#define EDCF_AC_BK_TXOP_STA          0x0000
-#define EDCF_AC_VO_ACI_STA           0x62
-#define EDCF_AC_VO_ECW_STA           0x32
-#define EDCF_AC_VI_ACI_STA           0x42
-#define EDCF_AC_VI_ECW_STA           0x43
-#define EDCF_AC_BK_ECW_STA           0xA4
-#define EDCF_AC_VI_TXOP_STA          0x005e
-#define EDCF_AC_VO_TXOP_STA          0x002f
-#define EDCF_AC_BE_ACI_STA           0x03
-#define EDCF_AC_BE_ECW_STA           0xA4
-#define EDCF_AC_BK_ACI_STA           0x27
-#define EDCF_AC_VO_TXOP_AP           0x002f
-
-#define EDCF_TXOP2USEC(txop)         ((txop) << 5)
-#define EDCF_ECW2CW(exp)             ((1 << (exp)) - 1)
-
-#define APHY_SYMBOL_TIME       4
-#define APHY_PREAMBLE_TIME     16
-#define APHY_SIGNAL_TIME       4
-#define APHY_SIFS_TIME         16
-#define APHY_SERVICE_NBITS     16
-#define APHY_TAIL_NBITS                6
-#define BPHY_SIFS_TIME         10
-#define BPHY_PLCP_SHORT_TIME   96
-
-#define PREN_PREAMBLE          24
-#define PREN_MM_EXT            12
-#define PREN_PREAMBLE_EXT      4
+#define EDCF_ACI_MASK                  0x60
+#define EDCF_ACI_SHIFT                 5
+#define EDCF_ECWMIN_MASK               0x0f
+#define EDCF_ECWMAX_SHIFT              4
+#define EDCF_AIFSN_MASK                        0x0f
+#define EDCF_AIFSN_MAX                 15
+#define EDCF_ECWMAX_MASK               0xf0
+
+#define EDCF_AC_BE_TXOP_STA            0x0000
+#define EDCF_AC_BK_TXOP_STA            0x0000
+#define EDCF_AC_VO_ACI_STA             0x62
+#define EDCF_AC_VO_ECW_STA             0x32
+#define EDCF_AC_VI_ACI_STA             0x42
+#define EDCF_AC_VI_ECW_STA             0x43
+#define EDCF_AC_BK_ECW_STA             0xA4
+#define EDCF_AC_VI_TXOP_STA            0x005e
+#define EDCF_AC_VO_TXOP_STA            0x002f
+#define EDCF_AC_BE_ACI_STA             0x03
+#define EDCF_AC_BE_ECW_STA             0xA4
+#define EDCF_AC_BK_ACI_STA             0x27
+#define EDCF_AC_VO_TXOP_AP             0x002f
+
+#define EDCF_TXOP2USEC(txop)           ((txop) << 5)
+#define EDCF_ECW2CW(exp)               ((1 << (exp)) - 1)
+
+#define APHY_SYMBOL_TIME               4
+#define APHY_PREAMBLE_TIME             16
+#define APHY_SIGNAL_TIME               4
+#define APHY_SIFS_TIME                 16
+#define APHY_SERVICE_NBITS             16
+#define APHY_TAIL_NBITS                        6
+#define BPHY_SIFS_TIME                 10
+#define BPHY_PLCP_SHORT_TIME           96
+
+#define PREN_PREAMBLE                  24
+#define PREN_MM_EXT                    12
+#define PREN_PREAMBLE_EXT              4
 
 #define DOT11_MAC_HDR_LEN              24
-#define        DOT11_ACK_LEN           10
-#define DOT11_BA_LEN           4
+#define DOT11_ACK_LEN                  10
+#define DOT11_BA_LEN                   4
 #define DOT11_OFDM_SIGNAL_EXTENSION    6
 #define DOT11_MIN_FRAG_LEN             256
-#define        DOT11_RTS_LEN           16
-#define        DOT11_CTS_LEN           10
+#define DOT11_RTS_LEN                  16
+#define DOT11_CTS_LEN                  10
 #define DOT11_BA_BITMAP_LEN            128
 #define DOT11_MIN_BEACON_PERIOD                1
 #define DOT11_MAX_BEACON_PERIOD                0xFFFF
-#define        DOT11_MAXNUMFRAGS       16
+#define DOT11_MAXNUMFRAGS              16
 #define DOT11_MAX_FRAG_LEN             2346
 
-#define BPHY_PLCP_TIME         192
-#define RIFS_11N_TIME          2
-
-#define WME_VER                        1
-#define WME_SUBTYPE_PARAM_IE   1
-#define WME_TYPE               2
-#define WME_OUI                        "\x00\x50\xf2"
+#define BPHY_PLCP_TIME                 192
+#define RIFS_11N_TIME                  2
 
-#define AC_BE                  0
-#define AC_BK                  1
-#define AC_VI                  2
-#define AC_VO                  3
-
-#define        BCN_TMPL_LEN            512     /* length of the BCN template area */
+/* length of the BCN template area */
+#define BCN_TMPL_LEN                   512
 
 /* brcms_bss_info flag bit values */
-#define BRCMS_BSS_HT           0x0020  /* BSS is HT (MIMO) capable */
-
-/* Flags used in brcms_c_txq_info.stopped */
-/* per prio flow control bits */
-#define TXQ_STOP_FOR_PRIOFC_MASK       0x000000FF
-/* stop txq enqueue for packet drain */
-#define TXQ_STOP_FOR_PKT_DRAIN         0x00000100
-/* stop txq enqueue for ampdu flow control */
-#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL  0x00000200
-
-#define        BRCMS_HWRXOFF           38      /* chip rx buffer offset */
-
-/* Find basic rate for a given rate */
-static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
-{
-       if (is_mcs_rate(rspec))
-               return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
-                      .leg_ofdm];
-       return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
-}
+#define BRCMS_BSS_HT                   0x0020  /* BSS is HT (MIMO) capable */
 
-static u16 frametype(u32 rspec, u8 mimoframe)
-{
-       if (is_mcs_rate(rspec))
-               return mimoframe;
-       return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
-}
+/* chip rx buffer offset */
+#define BRCMS_HWRXOFF                  38
 
 /* rfdisable delay timer 500 ms, runs of ALP clock */
-#define RFDISABLE_DEFAULT      10000000
+#define RFDISABLE_DEFAULT              10000000
 
 #define BRCMS_TEMPSENSE_PERIOD         10      /* 10 second timeout */
 
@@ -194,87 +129,83 @@ static u16 frametype(u32 rspec, u8 mimoframe)
  * These constants are used ONLY by wlc_prio2prec_map.  Do not use them
  * elsewhere.
  */
-#define        _BRCMS_PREC_NONE                0       /* None = - */
-#define        _BRCMS_PREC_BK          2       /* BK - Background */
-#define        _BRCMS_PREC_BE          4       /* BE - Best-effort */
-#define        _BRCMS_PREC_EE          6       /* EE - Excellent-effort */
-#define        _BRCMS_PREC_CL          8       /* CL - Controlled Load */
-#define        _BRCMS_PREC_VI          10      /* Vi - Video */
-#define        _BRCMS_PREC_VO          12      /* Vo - Voice */
-#define        _BRCMS_PREC_NC          14      /* NC - Network Control */
-
-/* The BSS is generating beacons in HW */
-#define BRCMS_BSSCFG_HW_BCN    0x20
-
-#define        SYNTHPU_DLY_APHY_US     3700    /* a phy synthpu_dly time in us */
-#define        SYNTHPU_DLY_BPHY_US     1050    /* b/g phy synthpu_dly time in us */
-#define        SYNTHPU_DLY_NPHY_US     2048    /* n phy REV3 synthpu_dly time in us */
-#define        SYNTHPU_DLY_LPPHY_US    300     /* lpphy synthpu_dly time in us */
-
-#define        SYNTHPU_DLY_PHY_US_QT   100     /* QT synthpu_dly time in us */
-
-#define        ANTCNT                  10      /* vanilla M_MAX_ANTCNT value */
+#define _BRCMS_PREC_NONE               0       /* None = - */
+#define _BRCMS_PREC_BK                 2       /* BK - Background */
+#define _BRCMS_PREC_BE                 4       /* BE - Best-effort */
+#define _BRCMS_PREC_EE                 6       /* EE - Excellent-effort */
+#define _BRCMS_PREC_CL                 8       /* CL - Controlled Load */
+#define _BRCMS_PREC_VI                 10      /* Vi - Video */
+#define _BRCMS_PREC_VO                 12      /* Vo - Voice */
+#define _BRCMS_PREC_NC                 14      /* NC - Network Control */
+
+/* synthpu_dly times in us */
+#define SYNTHPU_DLY_APHY_US            3700
+#define SYNTHPU_DLY_BPHY_US            1050
+#define SYNTHPU_DLY_NPHY_US            2048
+#define SYNTHPU_DLY_LPPHY_US           300
+
+#define ANTCNT                         10      /* vanilla M_MAX_ANTCNT val */
 
 /* Per-AC retry limit register definitions; uses defs.h bitfield macros */
-#define EDCF_SHORT_S            0
-#define EDCF_SFB_S              4
-#define EDCF_LONG_S             8
-#define EDCF_LFB_S              12
-#define EDCF_SHORT_M            BITFIELD_MASK(4)
-#define EDCF_SFB_M              BITFIELD_MASK(4)
-#define EDCF_LONG_M             BITFIELD_MASK(4)
-#define EDCF_LFB_M              BITFIELD_MASK(4)
+#define EDCF_SHORT_S                   0
+#define EDCF_SFB_S                     4
+#define EDCF_LONG_S                    8
+#define EDCF_LFB_S                     12
+#define EDCF_SHORT_M                   BITFIELD_MASK(4)
+#define EDCF_SFB_M                     BITFIELD_MASK(4)
+#define EDCF_LONG_M                    BITFIELD_MASK(4)
+#define EDCF_LFB_M                     BITFIELD_MASK(4)
 
-#define        RETRY_SHORT_DEF                 7       /* Default Short retry Limit */
-#define        RETRY_SHORT_MAX                 255     /* Maximum Short retry Limit */
-#define        RETRY_LONG_DEF                  4       /* Default Long retry count */
-#define        RETRY_SHORT_FB                  3 /* Short count for fallback rate */
-#define        RETRY_LONG_FB                   2 /* Long count for fallback rate */
+#define RETRY_SHORT_DEF                        7       /* Default Short retry Limit */
+#define RETRY_SHORT_MAX                        255     /* Maximum Short retry Limit */
+#define RETRY_LONG_DEF                 4       /* Default Long retry count */
+#define RETRY_SHORT_FB                 3       /* Short count for fb rate */
+#define RETRY_LONG_FB                  2       /* Long count for fb rate */
 
-#define        APHY_CWMIN              15
-#define PHY_CWMAX              1023
+#define APHY_CWMIN                     15
+#define PHY_CWMAX                      1023
 
-#define EDCF_AIFSN_MIN               1
+#define EDCF_AIFSN_MIN                 1
 
-#define FRAGNUM_MASK           0xF
+#define FRAGNUM_MASK                   0xF
 
-#define APHY_SLOT_TIME         9
-#define BPHY_SLOT_TIME         20
+#define APHY_SLOT_TIME                 9
+#define BPHY_SLOT_TIME                 20
 
-#define        WL_SPURAVOID_OFF        0
-#define        WL_SPURAVOID_ON1        1
-#define        WL_SPURAVOID_ON2        2
+#define WL_SPURAVOID_OFF               0
+#define WL_SPURAVOID_ON1               1
+#define WL_SPURAVOID_ON2               2
 
 /* invalid core flags, use the saved coreflags */
-#define BRCMS_USE_COREFLAGS    0xffffffff
+#define BRCMS_USE_COREFLAGS            0xffffffff
 
 /* values for PLCPHdr_override */
-#define BRCMS_PLCP_AUTO        -1
-#define BRCMS_PLCP_SHORT       0
-#define BRCMS_PLCP_LONG        1
+#define BRCMS_PLCP_AUTO                        -1
+#define BRCMS_PLCP_SHORT               0
+#define BRCMS_PLCP_LONG                        1
 
 /* values for g_protection_override and n_protection_override */
 #define BRCMS_PROTECTION_AUTO          -1
 #define BRCMS_PROTECTION_OFF           0
 #define BRCMS_PROTECTION_ON            1
 #define BRCMS_PROTECTION_MMHDR_ONLY    2
-#define BRCMS_PROTECTION_CTS_ONLY              3
+#define BRCMS_PROTECTION_CTS_ONLY      3
 
 /* values for g_protection_control and n_protection_control */
-#define BRCMS_PROTECTION_CTL_OFF               0
+#define BRCMS_PROTECTION_CTL_OFF       0
 #define BRCMS_PROTECTION_CTL_LOCAL     1
 #define BRCMS_PROTECTION_CTL_OVERLAP   2
 
 /* values for n_protection */
 #define BRCMS_N_PROTECTION_OFF         0
 #define BRCMS_N_PROTECTION_OPTIONAL    1
-#define BRCMS_N_PROTECTION_20IN40              2
+#define BRCMS_N_PROTECTION_20IN40      2
 #define BRCMS_N_PROTECTION_MIXEDMODE   3
 
 /* values for band specific 40MHz capabilities */
-#define BRCMS_N_BW_20ALL                       0
-#define BRCMS_N_BW_40ALL                       1
-#define BRCMS_N_BW_20IN2G_40IN5G               2
+#define BRCMS_N_BW_20ALL               0
+#define BRCMS_N_BW_40ALL               1
+#define BRCMS_N_BW_20IN2G_40IN5G       2
 
 /* bitflags for SGI support (sgi_rx iovar) */
 #define BRCMS_N_SGI_20                 0x01
@@ -282,48 +213,42 @@ static u16 frametype(u32 rspec, u8 mimoframe)
 
 /* defines used by the nrate iovar */
 /* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_MCS_INUSE        0x00000080
+#define NRATE_MCS_INUSE                        0x00000080
 /* rate/mcs value */
-#define NRATE_RATE_MASK 0x0000007f
+#define NRATE_RATE_MASK                        0x0000007f
 /* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_MASK 0x0000ff00
+#define NRATE_STF_MASK                 0x0000ff00
 /* stf mode shift */
-#define NRATE_STF_SHIFT        8
-/* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE 0x80000000
+#define NRATE_STF_SHIFT                        8
 /* bit indicate to override mcs only */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
-#define NRATE_SGI_MASK  0x00800000     /* sgi mode */
-#define NRATE_SGI_SHIFT 23     /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000   /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22    /* ldpc shift */
+#define NRATE_OVERRIDE_MCS_ONLY                0x40000000
+#define NRATE_SGI_MASK                 0x00800000      /* sgi mode */
+#define NRATE_SGI_SHIFT                        23              /* sgi mode */
+#define NRATE_LDPC_CODING              0x00400000      /* adv coding in use */
+#define NRATE_LDPC_SHIFT               22              /* ldpc shift */
 
-#define NRATE_STF_SISO 0       /* stf mode SISO */
-#define NRATE_STF_CDD  1       /* stf mode CDD */
-#define NRATE_STF_STBC 2       /* stf mode STBC */
-#define NRATE_STF_SDM  3       /* stf mode SDM */
+#define NRATE_STF_SISO                 0               /* stf mode SISO */
+#define NRATE_STF_CDD                  1               /* stf mode CDD */
+#define NRATE_STF_STBC                 2               /* stf mode STBC */
+#define NRATE_STF_SDM                  3               /* stf mode SDM */
 
-#define MAX_DMA_SEGS 4
+#define MAX_DMA_SEGS                   4
 
 /* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD           256
+#define NTXD                           256
 /* Max # of entries in Rx FIFO based on 4kb page size */
-#define NRXD           256
+#define NRXD                           256
 
 /* try to keep this # rbufs posted to the chip */
-#define        NRXBUFPOST      32
+#define NRXBUFPOST                     32
 
 /* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT                50
-
-/* bounded rx loops */
-#define RXBND          8 /* max # frames to process in brcms_c_recv() */
-#define TXSBND         8 /* max # tx status to process in wlc_txstatus() */
+#define BRCMS_DATAHIWAT                        50
 
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN       ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+/* max # frames to process in brcms_c_recv() */
+#define RXBND                          8
+/* max # tx status to process in wlc_txstatus() */
+#define TXSBND                         8
 
 /* brcmu_format_flags() bit description structure */
 struct brcms_c_bit_desc {
@@ -375,10 +300,22 @@ uint brcm_msg_level =
 #endif                         /* BCMDBG */
 
 /* TX FIFO number to WME/802.1E Access Category */
-static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
+static const u8 wme_fifo2ac[] = {
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BE,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VO,
+       IEEE80211_AC_BE,
+       IEEE80211_AC_BE
+};
 
-/* WME/802.1E Access Category to TX FIFO number */
-static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
+/* ieee80211 Access Category to TX FIFO number */
+static const u8 wme_ac2fifo[] = {
+       TX_AC_VO_FIFO,
+       TX_AC_VI_FIFO,
+       TX_AC_BE_FIFO,
+       TX_AC_BK_FIFO
+};
 
 /* 802.1D Priority to precedence queue mapping */
 const u8 wlc_prio2prec_map[] = {
@@ -405,13 +342,6 @@ static const u16 xmtfifo_sz[][NFIFO] = {
        {9, 58, 22, 14, 14, 5},
 };
 
-static const u8 acbitmap2maxprio[] = {
-       PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
-       PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
-       PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
-       PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
-};
-
 #ifdef BCMDBG
 static const char * const fifo_names[] = {
        "AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
@@ -424,6 +354,22 @@ static const char fifo_names[6][0];
 static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
 #endif
 
+/* Find basic rate for a given rate */
+static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
+{
+       if (is_mcs_rate(rspec))
+               return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
+                      .leg_ofdm];
+       return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
+}
+
+static u16 frametype(u32 rspec, u8 mimoframe)
+{
+       if (is_mcs_rate(rspec))
+               return mimoframe;
+       return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
+}
+
 /* currently the best mechanism for determining SIFS is the band in use */
 static u16 get_sifs(struct brcms_band *band)
 {
@@ -470,20 +416,6 @@ static int brcms_chspec_bw(u16 chanspec)
        return BRCMS_10_MHZ;
 }
 
-/*
- * return true if Minimum Power Consumption should
- * be entered, false otherwise
- */
-static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
-{
-       return false;
-}
-
-static bool brcms_c_ismpc(struct brcms_c_info *wlc)
-{
-       return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
-}
-
 static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
 {
        if (cfg == NULL)
@@ -669,9 +601,8 @@ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
  * calculate frame duration of a given rate and length, return
  * time in usec unit
  */
-uint
-brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
-                       u8 preamble_type, uint mac_len)
+static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
+                                   u8 preamble_type, uint mac_len)
 {
        uint nsyms, dur = 0, Ndps, kNdps;
        uint rate = rspec2rate(ratespec);
@@ -969,7 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
                    lfbl,       /* Long Frame Rate Fallback Limit */
                    fbl;
 
-               if (queue < AC_COUNT) {
+               if (queue < IEEE80211_NUM_ACS) {
                        sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
                                      EDCF_SFB);
                        lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
@@ -1018,14 +949,12 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
                        tx_info->flags |= IEEE80211_TX_STAT_ACK;
        }
 
-       totlen = brcmu_pkttotlen(p);
+       totlen = p->len;
        free_pdu = true;
 
        brcms_c_txfifo_complete(wlc, queue, 1);
 
        if (lastframe) {
-               p->next = NULL;
-               p->prev = NULL;
                /* remove PLCP & Broadcom tx descriptor header */
                skb_pull(p, D11_PHY_HDR_LEN);
                skb_pull(p, D11_TXH_LEN);
@@ -2352,13 +2281,6 @@ void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
        wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
 }
 
-static void brcms_c_fatal_error(struct brcms_c_info *wlc)
-{
-       wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
-                 wlc->pub->unit);
-       brcms_init(wlc->wl);
-}
-
 static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
 {
        bool fatal = false;
@@ -2414,7 +2336,7 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
                }
 
                if (fatal) {
-                       brcms_c_fatal_error(wlc_hw->wlc);       /* big hammer */
+                       brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
                        break;
                } else
                        W_REG(&regs->intctrlregs[idx].intstatus,
@@ -2479,6 +2401,7 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
        W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
 }
 
+/* assumes that the d11 MAC is enabled */
 static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
                                    uint tx_fifo)
 {
@@ -2535,11 +2458,12 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
        }
 }
 
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
+/* precondition: requires the mac core to be enabled */
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
 {
        static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
 
-       if (on) {
+       if (mute_tx) {
                /* suspend tx fifos */
                brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
                brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
@@ -2561,14 +2485,20 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
                                       wlc_hw->etheraddr);
        }
 
-       wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
+       wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
 
-       if (on)
+       if (mute_tx)
                brcms_c_ucode_mute_override_set(wlc_hw);
        else
                brcms_c_ucode_mute_override_clear(wlc_hw);
 }
 
+void
+brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
+{
+       brcms_b_mute(wlc->hw, mute_tx);
+}
+
 /*
  * Read and clear macintmask and macintstatus and intstatus registers.
  * This routine should be called with interrupts off
@@ -3132,7 +3062,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
                return false;
 
        /* disallow PS when one of these meets when not scanning */
-       if (wlc->monitor)
+       if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
                return false;
 
        if (cfg->associated) {
@@ -3437,8 +3367,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
 }
 
 void
-static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
-                         bool mute) {
+static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
        u32 macintmask;
        bool fastclk;
        struct brcms_c_info *wlc = wlc_hw->wlc;
@@ -3463,10 +3392,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
        /* core-specific initialization */
        brcms_b_coreinit(wlc);
 
-       /* suspend the tx fifos and mute the phy for preism cac time */
-       if (mute)
-               brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
-
        /* band-specific inits */
        brcms_b_bsinit(wlc, chanspec);
 
@@ -3656,42 +3581,32 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
        brcms_c_set_phy_chanspec(wlc, chanspec);
 }
 
-static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
-{
-       if (wlc->bcnmisc_monitor)
-               brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
-       else
-               brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
-}
-
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
-       wlc->bcnmisc_monitor = promisc;
-       brcms_c_mac_bcn_promisc(wlc);
-}
-
-/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+/*
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
+ */
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
 {
        u32 promisc_bits = 0;
 
-       /*
-        * promiscuous mode just sets MCTL_PROMISC
-        * Note: APs get all BSS traffic without the need to set
-        * the MCTL_PROMISC bit since all BSS data traffic is
-        * directed at the AP
-        */
-       if (wlc->pub->promisc)
+       wlc->filter_flags = filter_flags;
+
+       if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
                promisc_bits |= MCTL_PROMISC;
 
-       /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
-        * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
-        * handled in brcms_c_mac_bcn_promisc()
-        */
-       if (wlc->monitor)
-               promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
+       if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
+               promisc_bits |= MCTL_BCNS_PROMISC;
+
+       if (filter_flags & FIF_FCSFAIL)
+               promisc_bits |= MCTL_KEEPBADFCS;
 
-       brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+       if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+               promisc_bits |= MCTL_KEEPCONTROL;
+
+       brcms_b_mctrl(wlc->hw,
+               MCTL_PROMISC | MCTL_BCNS_PROMISC |
+               MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+               promisc_bits);
 }
 
 /*
@@ -3721,10 +3636,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
        } else {
                /* disable an active IBSS if we are not on the home channel */
        }
-
-       /* update the various promisc bits */
-       brcms_c_mac_bcn_promisc(wlc);
-       brcms_c_mac_promisc(wlc);
 }
 
 static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3979,7 +3890,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
 
 void
 brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
-                     bool mute, struct txpwr_limits *txpwr)
+                     bool mute_tx, struct txpwr_limits *txpwr)
 {
        uint bandunit;
 
@@ -4005,7 +3916,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
                }
        }
 
-       wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
+       wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx);
 
        if (!wlc_hw->up) {
                if (wlc_hw->clk)
@@ -4017,7 +3928,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
                wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
 
                /* Update muting of the channel */
-               brcms_b_mute(wlc_hw, mute, 0);
+               brcms_b_mute(wlc_hw, mute_tx);
        }
 }
 
@@ -4205,7 +4116,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
            EDCF_TXOP2USEC(acp_shm.txop);
        acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
 
-       if (aci == AC_VI && acp_shm.txop == 0
+       if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
            && acp_shm.aifs < EDCF_AIFSN_MAX)
                acp_shm.aifs++;
 
@@ -4242,7 +4153,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
        }
 }
 
-void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
+static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
 {
        u16 aci;
        int i_ac;
@@ -4255,7 +4166,7 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
        }; /* ucode needs these parameters during its initialization */
        const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
 
-       for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
+       for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
                /* find out which ac this set of params applies to */
                aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
 
@@ -4277,17 +4188,6 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
        }
 }
 
-/* maintain LED behavior in down state */
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
-{
-       /*
-        * maintain LEDs while in down state, turn on sbclk if
-        * not available yet. Turn on sbclk if necessary
-        */
-       brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP);
-       brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP);
-}
-
 static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
 {
        /* Don't start the timer if HWRADIO feature is disabled */
@@ -4299,28 +4199,6 @@ static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
        brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
 }
 
-static void brcms_c_radio_disable(struct brcms_c_info *wlc)
-{
-       if (!wlc->pub->up) {
-               brcms_c_down_led_upd(wlc);
-               return;
-       }
-
-       brcms_c_radio_monitor_start(wlc);
-       brcms_down(wlc->wl);
-}
-
-static void brcms_c_radio_enable(struct brcms_c_info *wlc)
-{
-       if (wlc->pub->up)
-               return;
-
-       if (brcms_deviceremoved(wlc))
-               return;
-
-       brcms_up(wlc->wl);
-}
-
 static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
 {
        if (!wlc->radio_monitor)
@@ -4343,18 +4221,6 @@ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
                mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
 }
 
-/*
- * centralized radio disable/enable function,
- * invoke radio enable/disable after updating hwradio status
- */
-static void brcms_c_radio_upd(struct brcms_c_info *wlc)
-{
-       if (wlc->pub->radio_disabled)
-               brcms_c_radio_disable(wlc);
-       else
-               brcms_c_radio_enable(wlc);
-}
-
 /* update hwradio status and return it */
 bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
 {
@@ -4376,12 +4242,7 @@ static void brcms_c_radio_timer(void *arg)
                return;
        }
 
-       /* cap mpc off count */
-       if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
-               wlc->mpc_offcnt++;
-
        brcms_c_radio_hwdisable_upd(wlc);
-       brcms_c_radio_upd(wlc);
 }
 
 /* common low-level watchdog code */
@@ -4407,60 +4268,6 @@ static void brcms_b_watchdog(void *arg)
        wlc_phy_watchdog(wlc_hw->band->pi);
 }
 
-static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
-{
-       bool mpc_radio, radio_state;
-
-       /*
-        * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
-        * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
-        * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
-        * the radio is going down.
-        */
-       if (!wlc->mpc) {
-               if (!wlc->pub->radio_disabled)
-                       return;
-               mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
-               brcms_c_radio_upd(wlc);
-               if (!wlc->pub->radio_disabled)
-                       brcms_c_radio_monitor_stop(wlc);
-               return;
-       }
-
-       /*
-        * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in
-        * wlc->pub->radio_disabled to go ON, always call radio_upd
-        * synchronously to go OFF, postpone radio_upd to later when
-        * context is safe(e.g. watchdog)
-        */
-       radio_state =
-           (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
-            ON);
-       mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
-
-       if (radio_state == ON && mpc_radio == OFF)
-               wlc->mpc_delay_off = wlc->mpc_dlycnt;
-       else if (radio_state == OFF && mpc_radio == ON) {
-               mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
-               brcms_c_radio_upd(wlc);
-               if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
-                       wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
-               else
-                       wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
-       }
-       /*
-        * Below logic is meant to capture the transition from mpc off
-        * to mpc on for reasons other than wlc->mpc_delay_off keeping
-        * the mpc off. In that case reset wlc->mpc_delay_off to
-        * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
-        */
-       if ((wlc->prev_non_delay_mpc == false) &&
-           (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off)
-               wlc->mpc_delay_off = wlc->mpc_dlycnt;
-
-       wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
-}
-
 /* common watchdog code */
 static void brcms_c_watchdog(void *arg)
 {
@@ -4481,21 +4288,7 @@ static void brcms_c_watchdog(void *arg)
        /* increment second count */
        wlc->pub->now++;
 
-       /* delay radio disable */
-       if (wlc->mpc_delay_off) {
-               if (--wlc->mpc_delay_off == 0) {
-                       mboolset(wlc->pub->radio_disabled,
-                                WL_RADIO_MPC_DISABLE);
-                       if (wlc->mpc && brcms_c_ismpc(wlc))
-                               wlc->mpc_offcnt = 0;
-               }
-       }
-
-       /* mpc sync */
-       brcms_c_radio_mpc_upd(wlc);
-       /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
        brcms_c_radio_hwdisable_upd(wlc);
-       brcms_c_radio_upd(wlc);
        /* if radio is disable, driver may be down, quit here */
        if (wlc->pub->radio_disabled)
                return;
@@ -4599,9 +4392,6 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
        /* WME QoS mode is Auto by default */
        wlc->pub->_ampdu = AMPDU_AGG_HOST;
        wlc->pub->bcmerror = 0;
-
-       /* initialize mpc delay */
-       wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
 }
 
 static uint brcms_c_attach_module(struct brcms_c_info *wlc)
@@ -5259,9 +5049,6 @@ static void brcms_c_ap_upd(struct brcms_c_info *wlc)
 {
        /* STA-BSS; short capable */
        wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
-
-       /* fixup mpc */
-       wlc->mpc = true;
 }
 
 /* Initialize just the hardware when coming out of POR or S3/S5 system states */
@@ -5376,7 +5163,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
        if (!wlc->clk)
                return;
 
-       for (ac = 0; ac < AC_COUNT; ac++)
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
                                  wlc->wme_retries[ac]);
 }
@@ -5575,7 +5362,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
        if (!wlc->pub->up)
                return callbacks;
 
-       /* in between, mpc could try to bring down again.. */
        wlc->going_down = true;
 
        callbacks += brcms_b_bmac_down_prep(wlc->hw);
@@ -5852,7 +5638,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
 
        brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
 
-       for (ac = 0; ac < AC_COUNT; ac++) {
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                wlc->wme_retries[ac] =  SFIELD(wlc->wme_retries[ac],
                                               EDCF_SHORT,  wlc->SRL);
                wlc->wme_retries[ac] =  SFIELD(wlc->wme_retries[ac],
@@ -6103,7 +5889,6 @@ void brcms_c_print_txdesc(struct d11txh *txh)
 
        u8 *rtsph = txh->RTSPhyHeader;
        struct ieee80211_rts rts = txh->rts_frame;
-       char hexbuf[256];
 
        /* add plcp header along with txh descriptor */
        printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
@@ -6124,17 +5909,16 @@ void brcms_c_print_txdesc(struct d11txh *txh)
        printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
        printk(KERN_DEBUG "\n");
 
-       brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
-       printk(KERN_DEBUG "SecIV:       %s\n", hexbuf);
-       brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
-       printk(KERN_DEBUG "RA:          %s\n", hexbuf);
+       print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
+       print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
+                            ra, sizeof(txh->TxFrameRA));
 
        printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
-       brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
-       printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+       print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
+                            rtspfb, sizeof(txh->RTSPLCPFallback));
        printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
-       brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
-       printk(KERN_DEBUG "PLCP: %s ", hexbuf);
+       print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
+                            fragpfb, sizeof(txh->FragPLCPFallback));
        printk(KERN_DEBUG "DUR: %04x", fragdfb);
        printk(KERN_DEBUG "\n");
 
@@ -6149,18 +5933,18 @@ void brcms_c_print_txdesc(struct d11txh *txh)
        printk(KERN_DEBUG "MaxAggbyte_fb:  %04x\n", mabyte_f);
        printk(KERN_DEBUG "MinByte:     %04x\n", mmbyte);
 
-       brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
-       printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
-       brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
-       printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
+       print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
+                            rtsph, sizeof(txh->RTSPhyHeader));
+       print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
+                            (u8 *)&rts, sizeof(txh->rts_frame));
        printk(KERN_DEBUG "\n");
 }
 #endif                         /* defined(BCMDBG) */
 
 #if defined(BCMDBG)
-int
+static int
 brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
-                  int len)
+                    int len)
 {
        int i;
        char *p = buf;
@@ -6916,7 +6700,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
        qos = ieee80211_is_data_qos(h->frame_control);
 
        /* compute length of frame in bytes for use in PLCP computations */
-       len = brcmu_pkttotlen(p);
+       len = p->len;
        phylen = len + FCS_LEN;
 
        /* Get tx_info */
@@ -8253,12 +8037,6 @@ int brcms_c_get_tx_power(struct brcms_c_info *wlc)
        return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
 }
 
-void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc)
-{
-       wlc->mpc = mpc;
-       brcms_c_radio_mpc_upd(wlc);
-}
-
 /* Process received frames */
 /*
  * Return true if more frames need to be processed. false otherwise.
@@ -8293,14 +8071,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
        len = p->len;
 
        if (rxh->RxStatus1 & RXS_FCSERR) {
-               if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
-                       wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
-                                 " tossing\n");
-                       goto toss;
-               } else {
-                       wiphy_err(wlc->wiphy, "RCSERR!!!\n");
+               if (!(wlc->filter_flags & FIF_FCSFAIL))
                        goto toss;
-               }
        }
 
        /* check received pkt has at least frame control field */
@@ -8328,21 +8100,17 @@ static bool
 brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
 {
        struct sk_buff *p;
-       struct sk_buff *head = NULL;
-       struct sk_buff *tail = NULL;
+       struct sk_buff *next = NULL;
+       struct sk_buff_head recv_frames;
+
        uint n = 0;
        uint bound_limit = bound ? RXBND : -1;
 
        BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
-       /* gather received frames */
-       while ((p = dma_rx(wlc_hw->di[fifo]))) {
+       skb_queue_head_init(&recv_frames);
 
-               if (!tail)
-                       head = tail = p;
-               else {
-                       tail->prev = p;
-                       tail = p;
-               }
+       /* gather received frames */
+       while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
 
                /* !give others some time to run! */
                if (++n >= bound_limit)
@@ -8353,12 +8121,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
        dma_rxfill(wlc_hw->di[fifo]);
 
        /* process each frame */
-       while ((p = head) != NULL) {
+       skb_queue_walk_safe(&recv_frames, p, next) {
                struct d11rxhdr_le *rxh_le;
                struct d11rxhdr *rxh;
-               head = head->prev;
-               p->prev = NULL;
 
+               skb_unlink(p, &recv_frames);
                rxh_le = (struct d11rxhdr_le *)p->data;
                rxh = (struct d11rxhdr *)p->data;
 
@@ -8448,8 +8215,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
                printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
                                        __func__, wlc_hw->sih->chip,
                                        wlc_hw->sih->chiprev);
-               /* big hammer */
-               brcms_init(wlc->wl);
+               brcms_fatal_error(wlc_hw->wlc->wl);
        }
 
        /* gptimer timeout */
@@ -8470,15 +8236,14 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
        return wlc->macintstatus != 0;
 
  fatal:
-       brcms_init(wlc->wl);
+       brcms_fatal_error(wlc_hw->wlc->wl);
        return wlc->macintstatus != 0;
 }
 
-void brcms_c_init(struct brcms_c_info *wlc)
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
 {
        struct d11regs __iomem *regs;
        u16 chanspec;
-       bool mute = false;
 
        BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
 
@@ -8494,7 +8259,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
        else
                chanspec = brcms_c_init_chanspec(wlc);
 
-       brcms_b_init(wlc->hw, chanspec, mute);
+       brcms_b_init(wlc->hw, chanspec);
 
        /* update beacon listen interval */
        brcms_c_bcn_li_upd(wlc);
@@ -8560,15 +8325,16 @@ void brcms_c_init(struct brcms_c_info *wlc)
        /* ..now really unleash hell (allow the MAC out of suspend) */
        brcms_c_enable_mac(wlc);
 
+       /* suspend the tx fifos and mute the phy for preism cac time */
+       if (mute_tx)
+               brcms_b_mute(wlc->hw, true);
+
        /* clear tx flow control */
        brcms_c_txflowcontrol_reset(wlc);
 
        /* enable the RF Disable Delay timer */
        W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
 
-       /* initialize mpc delay */
-       wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
-
        /*
         * Initialize WME parameters; if they haven't been set by some other
         * mechanism (IOVar, etc) then read them from the hardware.
@@ -8577,7 +8343,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
                /* Uninitialized; read from HW */
                int ac;
 
-               for (ac = 0; ac < AC_COUNT; ac++)
+               for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        wlc->wme_retries[ac] =
                            brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
        }
@@ -8754,8 +8520,6 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
                brcms_c_ht_update_sgi_rx(wlc, 0);
        }
 
-       /* initialize radio_mpc_disable according to wlc->mpc */
-       brcms_c_radio_mpc_upd(wlc);
        brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
 
        if (perr)
index c0e0fcf..e2de97d 100644 (file)
@@ -44,8 +44,6 @@
 /* transmit buffer max headroom for protocol headers */
 #define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
 
-#define AC_COUNT               4
-
 /* Macros for doing definition and get/set of bitfields
  * Usage example, e.g. a three-bit field (bits 4-6):
  *    #define <NAME>_M BITFIELD_MASK(3)
@@ -427,11 +425,6 @@ struct brcms_txq_info {
  * bandinit_pending: track band init in auto band.
  * radio_monitor: radio timer is running.
  * going_down: down path intermediate variable.
- * mpc: enable minimum power consumption.
- * mpc_dlycnt: # of watchdog cnt before turn disable radio.
- * mpc_offcnt: # of watchdog cnt that radio is disabled.
- * mpc_delay_off: delay radio disable by # of watchdog cnt.
- * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc.
  * wdtimer: timer for watchdog routine.
  * radio_timer: timer for hw radio button monitor routine.
  * monitor: monitor (MPDU sniffing) mode.
@@ -441,7 +434,7 @@ struct brcms_txq_info {
  * bcn_li_dtim: beacon listen interval in # dtims.
  * WDarmed: watchdog timer is armed.
  * WDlast: last time wlc_watchdog() was called.
- * edcf_txop[AC_COUNT]: current txop for each ac.
+ * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
  * wme_retries: per-AC retry limits.
  * tx_prec_map: Precedence map based on HW FIFO space.
  * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
@@ -522,18 +515,11 @@ struct brcms_c_info {
        bool radio_monitor;
        bool going_down;
 
-       bool mpc;
-       u8 mpc_dlycnt;
-       u8 mpc_offcnt;
-       u8 mpc_delay_off;
-       u8 prev_non_delay_mpc;
-
        struct brcms_timer *wdtimer;
        struct brcms_timer *radio_timer;
 
        /* promiscuous */
-       bool monitor;
-       bool bcnmisc_monitor;
+       uint filter_flags;
 
        /* driver feature */
        bool _rifs;
@@ -546,9 +532,9 @@ struct brcms_c_info {
        u32 WDlast;
 
        /* WME */
-       u16 edcf_txop[AC_COUNT];
+       u16 edcf_txop[IEEE80211_NUM_ACS];
 
-       u16 wme_retries[AC_COUNT];
+       u16 wme_retries[IEEE80211_NUM_ACS];
        u16 tx_prec_map;
        u16 fifo2prec_map[NFIFO];
 
@@ -671,8 +657,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
 #endif
 
 extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
-                                          bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
 extern void brcms_c_send_q(struct brcms_c_info *wlc);
 extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
                            uint *fifo);
index a314925..008aab9 100644 (file)
@@ -109,10 +109,10 @@ static const struct chan_info_basic chan_info_all[] = {
        {204, 5020},
        {208, 5040},
        {212, 5060},
-       {216, 50800}
+       {216, 5080}
 };
 
-const u8 ofdm_rate_lookup[] = {
+static const u8 ofdm_rate_lookup[] = {
 
        BRCM_RATE_48M,
        BRCM_RATE_24M,
@@ -190,15 +190,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
                data = R_REG(&pi->regs->radioregdata);
        } else {
                W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-
-#ifdef __ARM_ARCH_4T__
-               __asm__(" .align 4 ");
-               __asm__(" nop ");
-               data = R_REG(&pi->regs->phy4wdatalo);
-#else
                data = R_REG(&pi->regs->phy4wdatalo);
-#endif
-
        }
        pi->phy_wreg = 0;
 
index bea8524..5f9478b 100644 (file)
@@ -774,11 +774,6 @@ struct brcms_phy {
        s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ];
        u8 nphy_noise_index;
 
-       u8 nphy_txpid2g[PHY_CORE_NUM_2];
-       u8 nphy_txpid5g[PHY_CORE_NUM_2];
-       u8 nphy_txpid5gl[PHY_CORE_NUM_2];
-       u8 nphy_txpid5gh[PHY_CORE_NUM_2];
-
        bool nphy_gain_boost;
        bool nphy_elna_gain_config;
        u16 old_bphy_test;
index cd19c2f..ec9b566 100644 (file)
@@ -29,6 +29,7 @@
 #include "phy_radio.h"
 #include "phyreg_n.h"
 #include "phytbl_n.h"
+#include "soc.h"
 
 #define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name)        \
        read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
@@ -14417,12 +14418,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
                switch (band_num) {
                case 0:
 
-                       pi->nphy_txpid2g[PHY_CORE_0] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID2GA0);
-                       pi->nphy_txpid2g[PHY_CORE_1] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID2GA1);
                        pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
                                (s8) wlapi_getintvar(shim,
                                                     BRCMS_SROM_MAXP2GA0);
@@ -14486,12 +14481,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
                        break;
                case 1:
 
-                       pi->nphy_txpid5g[PHY_CORE_0] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID5GA0);
-                       pi->nphy_txpid5g[PHY_CORE_1] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID5GA1);
                        pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
                                (s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
                        pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
@@ -14551,12 +14540,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
                        break;
                case 2:
 
-                       pi->nphy_txpid5gl[0] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID5GLA0);
-                       pi->nphy_txpid5gl[1] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID5GLA1);
                        pi->nphy_pwrctrl_info[0].max_pwr_5gl =
                                (s8) wlapi_getintvar(shim,
                                                     BRCMS_SROM_MAXP5GLA0);
@@ -14615,12 +14598,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
                        break;
                case 3:
 
-                       pi->nphy_txpid5gh[0] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID5GHA0);
-                       pi->nphy_txpid5gh[1] =
-                               (u8) wlapi_getintvar(shim,
-                                                    BRCMS_SROM_TXPID5GHA1);
                        pi->nphy_pwrctrl_info[0].max_pwr_5gh =
                                (s8) wlapi_getintvar(shim,
                                                     BRCMS_SROM_MAXP5GHA0);
@@ -27994,20 +27971,11 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
                chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
                switch (chan_freq_range) {
                case WL_CHAN_FREQ_RANGE_2G:
-                       txpi[0] = pi->nphy_txpid2g[0];
-                       txpi[1] = pi->nphy_txpid2g[1];
-                       break;
                case WL_CHAN_FREQ_RANGE_5GL:
-                       txpi[0] = pi->nphy_txpid5gl[0];
-                       txpi[1] = pi->nphy_txpid5gl[1];
-                       break;
                case WL_CHAN_FREQ_RANGE_5GM:
-                       txpi[0] = pi->nphy_txpid5g[0];
-                       txpi[1] = pi->nphy_txpid5g[1];
-                       break;
                case WL_CHAN_FREQ_RANGE_5GH:
-                       txpi[0] = pi->nphy_txpid5gh[0];
-                       txpi[1] = pi->nphy_txpid5gh[1];
+                       txpi[0] = 0;
+                       txpi[1] = 0;
                        break;
                default:
                        txpi[0] = txpi[1] = 91;
index 3b36e3a..12ba575 100644 (file)
@@ -23,6 +23,7 @@
 #include "pub.h"
 #include "aiutils.h"
 #include "pmu.h"
+#include "soc.h"
 
 /*
  * external LPO crystal frequency
index 37bb2dc..21ccf3a 100644 (file)
@@ -170,22 +170,6 @@ enum brcms_srom_id {
        BRCMS_SROM_TSSIPOS2G,
        BRCMS_SROM_TSSIPOS5G,
        BRCMS_SROM_TXCHAIN,
-       BRCMS_SROM_TXPID2GA0,
-       BRCMS_SROM_TXPID2GA1,
-       BRCMS_SROM_TXPID2GA2,
-       BRCMS_SROM_TXPID2GA3,
-       BRCMS_SROM_TXPID5GA0,
-       BRCMS_SROM_TXPID5GA1,
-       BRCMS_SROM_TXPID5GA2,
-       BRCMS_SROM_TXPID5GA3,
-       BRCMS_SROM_TXPID5GHA0,
-       BRCMS_SROM_TXPID5GHA1,
-       BRCMS_SROM_TXPID5GHA2,
-       BRCMS_SROM_TXPID5GHA3,
-       BRCMS_SROM_TXPID5GLA0,
-       BRCMS_SROM_TXPID5GLA1,
-       BRCMS_SROM_TXPID5GLA2,
-       BRCMS_SROM_TXPID5GLA3,
        /*
         * per-path identifiers (see srom.c)
         */
@@ -225,10 +209,6 @@ enum brcms_srom_id {
        BRCMS_SROM_PA2GW2A1,
        BRCMS_SROM_PA2GW2A2,
        BRCMS_SROM_PA2GW2A3,
-       BRCMS_SROM_PA2GW3A0,
-       BRCMS_SROM_PA2GW3A1,
-       BRCMS_SROM_PA2GW3A2,
-       BRCMS_SROM_PA2GW3A3,
        BRCMS_SROM_PA5GHW0A0,
        BRCMS_SROM_PA5GHW0A1,
        BRCMS_SROM_PA5GHW0A2,
@@ -241,10 +221,6 @@ enum brcms_srom_id {
        BRCMS_SROM_PA5GHW2A1,
        BRCMS_SROM_PA5GHW2A2,
        BRCMS_SROM_PA5GHW2A3,
-       BRCMS_SROM_PA5GHW3A0,
-       BRCMS_SROM_PA5GHW3A1,
-       BRCMS_SROM_PA5GHW3A2,
-       BRCMS_SROM_PA5GHW3A3,
        BRCMS_SROM_PA5GLW0A0,
        BRCMS_SROM_PA5GLW0A1,
        BRCMS_SROM_PA5GLW0A2,
@@ -257,10 +233,6 @@ enum brcms_srom_id {
        BRCMS_SROM_PA5GLW2A1,
        BRCMS_SROM_PA5GLW2A2,
        BRCMS_SROM_PA5GLW2A3,
-       BRCMS_SROM_PA5GLW3A0,
-       BRCMS_SROM_PA5GLW3A1,
-       BRCMS_SROM_PA5GLW3A2,
-       BRCMS_SROM_PA5GLW3A3,
        BRCMS_SROM_PA5GW0A0,
        BRCMS_SROM_PA5GW0A1,
        BRCMS_SROM_PA5GW0A2,
@@ -273,14 +245,9 @@ enum brcms_srom_id {
        BRCMS_SROM_PA5GW2A1,
        BRCMS_SROM_PA5GW2A2,
        BRCMS_SROM_PA5GW2A3,
-       BRCMS_SROM_PA5GW3A0,
-       BRCMS_SROM_PA5GW3A1,
-       BRCMS_SROM_PA5GW3A2,
-       BRCMS_SROM_PA5GW3A3,
 };
 
 #define        BRCMS_NUMRATES  16      /* max # of rates in a rateset */
-#define        D11_PHY_HDR_LEN 6       /* Phy header length - 6 bytes */
 
 /* phy types */
 #define        PHY_TYPE_A      0       /* Phy type A */
@@ -414,7 +381,6 @@ struct brcms_pub {
        uint _nbands;           /* # bands supported */
        uint now;               /* # elapsed seconds */
 
-       bool promisc;           /* promiscuous destination address */
        bool delayed_down;      /* down delayed */
        bool associated;        /* true:part of [I]BSS, false: not */
        /* (union of stas_associated, aps_associated) */
@@ -572,7 +538,7 @@ extern int brcms_c_up(struct brcms_c_info *wlc);
 extern uint brcms_c_down(struct brcms_c_info *wlc);
 
 extern bool brcms_c_chipmatch(u16 vendor, u16 device);
-extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
 extern void brcms_c_reset(struct brcms_c_info *wlc);
 
 extern void brcms_c_intrson(struct brcms_c_info *wlc);
@@ -628,7 +594,7 @@ extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
                                        u8 interval);
 extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc);
 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
 
 #endif                         /* _BRCM_PUB_H_ */
index e7b9dc2..980d578 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "types.h"
 #include "d11.h"
+#include "phy_hal.h"
 
 extern const u8 rate_info[];
 extern const struct brcms_c_rateset cck_ofdm_mimo_rates;
@@ -198,11 +199,9 @@ static inline u8 cck_rspec(u8 cck)
 
 /* Convert encoded rate value in plcp header to numerical rates in 500 KHz
  * increments */
-extern const u8 ofdm_rate_lookup[];
-
 static inline u8 ofdm_phy2mac_rate(u8 rlpt)
 {
-       return ofdm_rate_lookup[rlpt & 0x7];
+       return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7];
 }
 
 static inline u8 cck_phy2mac_rate(u8 signal)
index 99f7910..b6987ea 100644 (file)
@@ -28,6 +28,7 @@
 #include "aiutils.h"
 #include "otp.h"
 #include "srom.h"
+#include "soc.h"
 
 /*
  * SROM CRC8 polynomial value:
@@ -62,9 +63,6 @@
 #define        SROM_MACHI_ET1          42
 #define        SROM_MACMID_ET1         43
 #define        SROM_MACLO_ET1          44
-#define        SROM3_MACHI             37
-#define        SROM3_MACMID            38
-#define        SROM3_MACLO             39
 
 #define        SROM_BXARSSI2G          40
 #define        SROM_BXARSSI5G          41
 
 #define        SROM_BFL                57
 #define        SROM_BFL2               28
-#define        SROM3_BFL2              61
 
 #define        SROM_AG10               58
 
 
 #define        SROM_OPO                60
 
-#define        SROM3_LEDDC             62
-
 #define        SROM_CRCREV             63
 
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
 #define        SROM4_WORDS             220
 
-#define        SROM4_SIGN              32
-#define        SROM4_SIGNATURE         0x5372
-
-#define        SROM4_BREV              33
-
-#define        SROM4_BFL0              34
-#define        SROM4_BFL1              35
-#define        SROM4_BFL2              36
-#define        SROM4_BFL3              37
-#define        SROM5_BFL0              37
-#define        SROM5_BFL1              38
-#define        SROM5_BFL2              39
-#define        SROM5_BFL3              40
-
-#define        SROM4_MACHI             38
-#define        SROM4_MACMID            39
-#define        SROM4_MACLO             40
-#define        SROM5_MACHI             41
-#define        SROM5_MACMID            42
-#define        SROM5_MACLO             43
-
-#define        SROM4_CCODE             41
-#define        SROM4_REGREV            42
-#define        SROM5_CCODE             34
-#define        SROM5_REGREV            35
-
-#define        SROM4_LEDBH10           43
-#define        SROM4_LEDBH32           44
-#define        SROM5_LEDBH10           59
-#define        SROM5_LEDBH32           60
-
-#define        SROM4_LEDDC             45
-#define        SROM5_LEDDC             45
-
-#define        SROM4_AA                46
-
-#define        SROM4_AG10              47
-#define        SROM4_AG32              48
-
-#define        SROM4_TXPID2G           49
-#define        SROM4_TXPID5G           51
-#define        SROM4_TXPID5GL          53
-#define        SROM4_TXPID5GH          55
-
-#define SROM4_TXRXC            61
 #define SROM4_TXCHAIN_MASK     0x000f
-#define SROM4_TXCHAIN_SHIFT    0
 #define SROM4_RXCHAIN_MASK     0x00f0
-#define SROM4_RXCHAIN_SHIFT    4
 #define SROM4_SWITCH_MASK      0xff00
-#define SROM4_SWITCH_SHIFT     8
 
 /* Per-path fields */
 #define        MAX_PATH_SROM           4
-#define        SROM4_PATH0             64
-#define        SROM4_PATH1             87
-#define        SROM4_PATH2             110
-#define        SROM4_PATH3             133
-
-#define        SROM4_2G_ITT_MAXP       0
-#define        SROM4_2G_PA             1
-#define        SROM4_5G_ITT_MAXP       5
-#define        SROM4_5GLH_MAXP         6
-#define        SROM4_5G_PA             7
-#define        SROM4_5GL_PA            11
-#define        SROM4_5GH_PA            15
-
-/* All the miriad power offsets */
-#define        SROM4_2G_CCKPO          156
-#define        SROM4_2G_OFDMPO         157
-#define        SROM4_5G_OFDMPO         159
-#define        SROM4_5GL_OFDMPO        161
-#define        SROM4_5GH_OFDMPO        163
-#define        SROM4_2G_MCSPO          165
-#define        SROM4_5G_MCSPO          173
-#define        SROM4_5GL_MCSPO         181
-#define        SROM4_5GH_MCSPO         189
-#define        SROM4_CDDPO             197
-#define        SROM4_STBCPO            198
-#define        SROM4_BW40PO            199
-#define        SROM4_BWDUPPO           200
 
 #define        SROM4_CRCREV            219
 
@@ -424,103 +338,32 @@ struct brcms_varbuf {
 static const struct brcms_sromvar pci_sromvars[] = {
        {BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
         0xffff},
-       {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV,
-        SROM_BR_MASK},
-       {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
        {BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
-        0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
-        0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0,
-        0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0,
-        0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff},
        {BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
         0xffff},
        {BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2,
-        0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff},
-       {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2,
-        0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff},
        {BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
         0xffff},
        {BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
        {BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
-       {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
-       {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff},
-       {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff},
-       {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff},
        {BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
-       {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
-       {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00},
-       {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff},
-       {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff},
        {BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
-       {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
-       {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
-       {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
-       {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
-       {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
-       {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
-       {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
-       {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
-       {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
-       {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
-       {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
-       {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
        {BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
        {BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
        {BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
        {BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
-       {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
-       {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
-       {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
-       {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff},
-       {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
        {BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
        {BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
        {BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
        {BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
        {BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
-       {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff},
        {BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
-       {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
-       {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff},
        {BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
-       {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
-       {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00},
        {BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
-       {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff},
-       {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00},
-       {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff},
-       {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00},
-       {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff},
-       {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00},
        {BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
        {BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
        {BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
        {BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
-       {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
-       {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
-       {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
-       {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
-       {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
-       {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
-       {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
-       {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
-       {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
-       {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00},
-       {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
-       {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
-       {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
        {BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
        {BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
        {BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
@@ -534,40 +377,20 @@ static const struct brcms_sromvar pci_sromvars[] = {
        {BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
        {BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
        {BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
-       {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
-       {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
-       {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
-       {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
        {BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
        {BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
        {BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
        {BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
-       {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
-       {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
-       {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
-       {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
        {BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
        {BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
        {BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
        {BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
-       {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff},
-       {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00},
-       {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
-       {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00},
        {BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
        {BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
        {BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
        {BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
-       {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
-       {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
        {BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
        {BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
-       {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
-        SROM4_TXCHAIN_MASK},
-       {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
-        SROM4_RXCHAIN_MASK},
-       {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
-        SROM4_SWITCH_MASK},
        {BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
         SROM4_TXCHAIN_MASK},
        {BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
@@ -594,43 +417,11 @@ static const struct brcms_sromvar pci_sromvars[] = {
         SROM8_FEM_ANTSWLUT_MASK},
        {BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
        {BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
-       {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
-       {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
-       {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
-       {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
-       {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
-       {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
-       {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
-       {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
-       {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
-       {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
-       {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
-       {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
-       {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
-       {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
-       {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
-       {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
-
-       {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
-       {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
-       {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+
        {BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
        {BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
-       {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
-       {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
-       {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
-       {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0,
-        0xffff},
-       {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1,
-        0xffff},
        {BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
         0xffff},
-       {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC,
-        0xffff},
-       {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC,
-        0xffff},
-       {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC,
-        0xffff},
        {BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
         0x01ff},
        {BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
@@ -650,16 +441,7 @@ static const struct brcms_sromvar pci_sromvars[] = {
        {BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
         0x00ff},
 
-       {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
        {BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
-       {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
-       {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
-       {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
-       {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
-       {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
        {BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
        {BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
        {BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
@@ -668,38 +450,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
        {BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
        {BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
        {BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
-       {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
-       {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
-       {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
-       {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
-       {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
-       {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
-       {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
-       {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
-       {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
-       {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
-       {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
-       {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
-       {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
-       {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
-       {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
-       {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
-       {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
-       {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
-       {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
-       {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
-       {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
-       {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
-       {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
-       {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
-       {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
-       {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
-       {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
-       {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
-       {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
-       {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
-       {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
-       {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
        {BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
        {BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
        {BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
@@ -732,10 +482,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
        {BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
        {BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
        {BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
-       {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff},
-       {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff},
-       {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff},
-       {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
        {BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
        {BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
        {BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
@@ -811,34 +557,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
 };
 
 static const struct brcms_sromvar perpath_pci_sromvars[] = {
-       {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
-       {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
-       {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
-       {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
-       {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
-       {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
-       {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
-       {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
-       {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
-       {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
-       {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
-       {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
-       {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
-       {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
-       {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
-       {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1,
-        0xffff},
-       {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2,
-        0xffff},
-       {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3,
-        0xffff},
-       {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
-       {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1,
-        0xffff},
-       {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2,
-        0xffff},
-       {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3,
-        0xffff},
        {BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
        {BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
        {BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
@@ -868,24 +586,17 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
  * shared between devices. */
 static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
 
-static u16 __iomem *
+static u8 __iomem *
 srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
 {
        if (sih->ccrev < 32)
-               return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET);
+               return curmap + PCI_BAR0_SPROM_OFFSET;
        if (sih->cccaps & CC_CAP_SROM)
-               return (u16 __iomem *)
-                      (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP);
+               return curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP;
 
        return NULL;
 }
 
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
 static uint mask_shift(u16 mask)
 {
        uint i;
@@ -906,18 +617,16 @@ static uint mask_width(u16 mask)
        return 0;
 }
 
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
+static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
 {
-       size /= 2;
-       while (size--)
-               *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size));
+       while (nwords--)
+               *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
 }
 
-static inline void htol16_buf(u16 *buf, unsigned int size)
+static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
 {
-       size /= 2;
-       while (size--)
-               *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size));
+       while (nwords--)
+               *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
 }
 
 /*
@@ -929,11 +638,14 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
        struct brcms_srom_list_head *entry;
        enum brcms_srom_id id;
        u16 w;
-       u32 val;
+       u32 val = 0;
        const struct brcms_sromvar *srv;
        uint width;
        uint flags;
        u32 sr = (1 << sromrev);
+       uint p;
+       uint pb =  SROM8_PATH0;
+       const uint psz = SROM8_PATH1 - SROM8_PATH0;
 
        /* first store the srom revision */
        entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
@@ -1031,47 +743,34 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
                list_add(&entry->var_list, var_list);
        }
 
-       if (sromrev >= 4) {
-               /* Do per-path variables */
-               uint p, pb, psz;
-
-               if (sromrev >= 8) {
-                       pb = SROM8_PATH0;
-                       psz = SROM8_PATH1 - SROM8_PATH0;
-               } else {
-                       pb = SROM4_PATH0;
-                       psz = SROM4_PATH1 - SROM4_PATH0;
-               }
-
-               for (p = 0; p < MAX_PATH_SROM; p++) {
-                       for (srv = perpath_pci_sromvars;
-                            srv->varid != BRCMS_SROM_NULL; srv++) {
-                               if ((srv->revmask & sr) == 0)
-                                       continue;
+       for (p = 0; p < MAX_PATH_SROM; p++) {
+               for (srv = perpath_pci_sromvars;
+                    srv->varid != BRCMS_SROM_NULL; srv++) {
+                       if ((srv->revmask & sr) == 0)
+                               continue;
 
-                               if (srv->flags & SRFL_NOVAR)
-                                       continue;
+                       if (srv->flags & SRFL_NOVAR)
+                               continue;
 
-                               w = srom[pb + srv->off];
-                               val = (w & srv->mask) >> mask_shift(srv->mask);
-                               width = mask_width(srv->mask);
+                       w = srom[pb + srv->off];
+                       val = (w & srv->mask) >> mask_shift(srv->mask);
+                       width = mask_width(srv->mask);
 
-                               /* Cheating: no per-path var is more than
-                                * 1 word */
-                               if ((srv->flags & SRFL_NOFFS)
-                                   && ((int)val == (1 << width) - 1))
-                                       continue;
+                       /* Cheating: no per-path var is more than
+                        * 1 word */
+                       if ((srv->flags & SRFL_NOFFS)
+                           && ((int)val == (1 << width) - 1))
+                               continue;
 
-                               entry =
-                                   kzalloc(sizeof(struct brcms_srom_list_head),
-                                           GFP_KERNEL);
-                               entry->varid = srv->varid+p;
-                               entry->var_type = BRCMS_SROM_UNUMBER;
-                               entry->uval = val;
-                               list_add(&entry->var_list, var_list);
-                       }
-                       pb += psz;
+                       entry =
+                           kzalloc(sizeof(struct brcms_srom_list_head),
+                                   GFP_KERNEL);
+                       entry->varid = srv->varid+p;
+                       entry->var_type = BRCMS_SROM_UNUMBER;
+                       entry->uval = val;
+                       list_add(&entry->var_list, var_list);
                }
+               pb += psz;
        }
 }
 
@@ -1080,41 +779,38 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
  * Return 0 on success, nonzero on error.
  */
 static int
-sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff,
+sprom_read_pci(struct si_pub *sih, u8 __iomem *sprom, uint wordoff,
               u16 *buf, uint nwords, bool check_crc)
 {
        int err = 0;
        uint i;
+       u8 *bbuf = (u8 *)buf; /* byte buffer */
+       uint nbytes = nwords << 1;
 
-       /* read the sprom */
-       for (i = 0; i < nwords; i++)
-               buf[i] = R_REG(&sprom[wordoff + i]);
-
-       if (check_crc) {
+       /* read the sprom in bytes */
+       for (i = 0; i < nbytes; i++)
+               bbuf[i] = readb(sprom+i);
 
-               if (buf[0] == 0xffff)
-                       /*
-                        * The hardware thinks that an srom that starts with
-                        * 0xffff is blank, regardless of the rest of the
-                        * content, so declare it bad.
-                        */
-                       return -ENODATA;
-
-               /* fixup the endianness so crc8 will pass */
-               htol16_buf(buf, nwords * 2);
-               if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2,
-                        CRC8_INIT_VALUE) !=
-                        CRC8_GOOD_VALUE(brcms_srom_crc8_table))
-                       /* DBG only pci always read srom4 first, then srom8/9 */
-                       err = -EIO;
+       if (buf[0] == 0xffff)
+               /*
+                * The hardware thinks that an srom that starts with
+                * 0xffff is blank, regardless of the rest of the
+                * content, so declare it bad.
+                */
+               return -ENODATA;
 
+       if (check_crc &&
+           crc8(brcms_srom_crc8_table, bbuf, nbytes, CRC8_INIT_VALUE) !=
+                CRC8_GOOD_VALUE(brcms_srom_crc8_table))
+               err = -EIO;
+       else
                /* now correct the endianness of the byte array */
-               ltoh16_buf(buf, nwords * 2);
-       }
+               le16_to_cpu_buf(buf, nwords);
+
        return err;
 }
 
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
 {
        u8 *otp;
        uint sz = OTP_SZ_MAX / 2;       /* size in words */
@@ -1126,7 +822,8 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
 
        err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
 
-       memcpy(buf, otp, bufsz);
+       sz = min_t(uint, sz, nwords);
+       memcpy(buf, otp, sz * 2);
 
        kfree(otp);
 
@@ -1139,13 +836,13 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
                return -ENODATA;
 
        /* fixup the endianness so crc8 will pass */
-       htol16_buf(buf, bufsz);
-       if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2,
+       cpu_to_le16_buf(buf, sz);
+       if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
                 CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
                err = -EIO;
-
-       /* now correct the endianness of the byte array */
-       ltoh16_buf(buf, bufsz);
+       else
+               /* now correct the endianness of the byte array */
+               le16_to_cpu_buf(buf, sz);
 
        return err;
 }
@@ -1157,7 +854,7 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
 static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
 {
        u16 *srom;
-       u16 __iomem *sromwindow;
+       u8 __iomem *sromwindow;
        u8 sromrev = 0;
        u32 sr;
        int err = 0;
@@ -1173,29 +870,16 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
 
        crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
        if (ai_is_sprom_available(sih)) {
-               err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
-                                    true);
-
-               if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
-                   (((sih->buscoretype == PCIE_CORE_ID)
-                     && (sih->buscorerev >= 6))
-                    || ((sih->buscoretype == PCI_CORE_ID)
-                        && (sih->buscorerev >= 0xe)))) {
-                       /* sromrev >= 4, read more */
-                       err = sprom_read_pci(sih, sromwindow, 0, srom,
-                                            SROM4_WORDS, true);
-                       sromrev = srom[SROM4_CRCREV] & 0xff;
-               } else if (err == 0) {
-                       /* srom is good and is rev < 4 */
+               err = sprom_read_pci(sih, sromwindow, 0, srom,
+                                    SROM4_WORDS, true);
+
+               if (err == 0)
+                       /* srom read and passed crc */
                        /* top word of sprom contains version and crc8 */
-                       sromrev = srom[SROM_CRCREV] & 0xff;
-                       /* bcm4401 sroms misprogrammed */
-                       if (sromrev == 0x10)
-                               sromrev = 1;
-               }
+                       sromrev = srom[SROM4_CRCREV] & 0xff;
        } else {
                /* Use OTP if SPROM not available */
-               err = otp_read_pci(sih, srom, SROM_MAX);
+               err = otp_read_pci(sih, srom, SROM4_WORDS);
                if (err == 0)
                        /* OTP only contain SROM rev8/rev9 for now */
                        sromrev = srom[SROM4_CRCREV] & 0xff;
@@ -1208,10 +892,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
                sr = 1 << sromrev;
 
                /*
-                * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8,
-                * 9
+                * srom version check: Current valid versions: 8, 9
                 */
-               if ((sr & 0x33e) == 0) {
+               if ((sr & 0x300) == 0) {
                        err = -EINVAL;
                        goto errout;
                }
index 708c43f..c81df97 100644 (file)
@@ -26,9 +26,4 @@ extern void srom_free_vars(struct si_pub *sih);
 extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
                     uint byteoff, uint nbytes, u16 *buf, bool check_crc);
 
-/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
- *   and extract from it into name=value pairs
- */
-extern int srom_parsecis(u8 **pcis, uint ciscnt,
-                        char **vars, uint *count);
 #endif                         /* _BRCM_SROM_H_ */
index f27c489..b7537f7 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/module.h>
+
 #include <brcmu_utils.h>
 
 MODULE_AUTHOR("Broadcom Corporation");
@@ -40,74 +41,20 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
 /* Free the driver packet. Free the tag if present */
 void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
 {
-       struct sk_buff *nskb;
-       int nest = 0;
-
-       /* perversion: we use skb->next to chain multi-skb packets */
-       while (skb) {
-               nskb = skb->next;
-               skb->next = NULL;
-
-               if (skb->destructor)
-                       /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
-                        * destructor exists
-                        */
-                       dev_kfree_skb_any(skb);
-               else
-                       /* can free immediately (even in_irq()) if destructor
-                        * does not exist
-                        */
-                       dev_kfree_skb(skb);
-
-               nest++;
-               skb = nskb;
-       }
+       WARN_ON(skb->next);
+       if (skb->destructor)
+               /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+                * destructor exists
+                */
+               dev_kfree_skb_any(skb);
+       else
+               /* can free immediately (even in_irq()) if destructor
+                * does not exist
+                */
+               dev_kfree_skb(skb);
 }
 EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
 
-
-/* copy a buffer into a pkt buffer chain */
-uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
-               unsigned char *buf)
-{
-       uint n, ret = 0;
-
-       /* skip 'offset' bytes */
-       for (; p && offset; p = p->next) {
-               if (offset < (uint) (p->len))
-                       break;
-               offset -= p->len;
-       }
-
-       if (!p)
-               return 0;
-
-       /* copy the data */
-       for (; p && len; p = p->next) {
-               n = min((uint) (p->len) - offset, (uint) len);
-               memcpy(p->data + offset, buf, n);
-               buf += n;
-               len -= n;
-               ret += n;
-               offset = 0;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL(brcmu_pktfrombuf);
-
-/* return total length of buffer chain */
-uint brcmu_pkttotlen(struct sk_buff *p)
-{
-       uint total;
-
-       total = 0;
-       for (; p; p = p->next)
-               total += p->len;
-       return total;
-}
-EXPORT_SYMBOL(brcmu_pkttotlen);
-
 /*
  * osl multiple-precedence packet queue
  * hi_prec is always >= the number of the highest non-empty precedence
@@ -115,21 +62,13 @@ EXPORT_SYMBOL(brcmu_pkttotlen);
 struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
                                      struct sk_buff *p)
 {
-       struct pktq_prec *q;
+       struct sk_buff_head *q;
 
        if (pktq_full(pq) || pktq_pfull(pq, prec))
                return NULL;
 
-       q = &pq->q[prec];
-
-       if (q->head)
-               q->tail->prev = p;
-       else
-               q->head = p;
-
-       q->tail = p;
-       q->len++;
-
+       q = &pq->q[prec].skblist;
+       skb_queue_tail(q, p);
        pq->len++;
 
        if (pq->hi_prec < prec)
@@ -142,20 +81,13 @@ EXPORT_SYMBOL(brcmu_pktq_penq);
 struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
                                           struct sk_buff *p)
 {
-       struct pktq_prec *q;
+       struct sk_buff_head *q;
 
        if (pktq_full(pq) || pktq_pfull(pq, prec))
                return NULL;
 
-       q = &pq->q[prec];
-
-       if (q->head == NULL)
-               q->tail = p;
-
-       p->prev = q->head;
-       q->head = p;
-       q->len++;
-
+       q = &pq->q[prec].skblist;
+       skb_queue_head(q, p);
        pq->len++;
 
        if (pq->hi_prec < prec)
@@ -167,53 +99,30 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head);
 
 struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
 {
-       struct pktq_prec *q;
+       struct sk_buff_head *q;
        struct sk_buff *p;
 
-       q = &pq->q[prec];
-
-       p = q->head;
+       q = &pq->q[prec].skblist;
+       p = skb_dequeue(q);
        if (p == NULL)
                return NULL;
 
-       q->head = p->prev;
-       if (q->head == NULL)
-               q->tail = NULL;
-
-       q->len--;
-
        pq->len--;
-
-       p->prev = NULL;
-
        return p;
 }
 EXPORT_SYMBOL(brcmu_pktq_pdeq);
 
 struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
 {
-       struct pktq_prec *q;
-       struct sk_buff *p, *prev;
-
-       q = &pq->q[prec];
+       struct sk_buff_head *q;
+       struct sk_buff *p;
 
-       p = q->head;
+       q = &pq->q[prec].skblist;
+       p = skb_dequeue_tail(q);
        if (p == NULL)
                return NULL;
 
-       for (prev = NULL; p != q->tail; p = p->prev)
-               prev = p;
-
-       if (prev)
-               prev->prev = NULL;
-       else
-               q->head = NULL;
-
-       q->tail = prev;
-       q->len--;
-
        pq->len--;
-
        return p;
 }
 EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
@@ -222,31 +131,17 @@ void
 brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
                  bool (*fn)(struct sk_buff *, void *), void *arg)
 {
-       struct pktq_prec *q;
-       struct sk_buff *p, *prev = NULL;
+       struct sk_buff_head *q;
+       struct sk_buff *p, *next;
 
-       q = &pq->q[prec];
-       p = q->head;
-       while (p) {
+       q = &pq->q[prec].skblist;
+       skb_queue_walk_safe(q, p, next) {
                if (fn == NULL || (*fn) (p, arg)) {
-                       bool head = (p == q->head);
-                       if (head)
-                               q->head = p->prev;
-                       else
-                               prev->prev = p->prev;
-                       p->prev = NULL;
+                       skb_unlink(p, q);
                        brcmu_pkt_buf_free_skb(p);
-                       q->len--;
                        pq->len--;
-                       p = (head ? q->head : prev->prev);
-               } else {
-                       prev = p;
-                       p = p->prev;
                }
        }
-
-       if (q->head == NULL)
-               q->tail = NULL;
 }
 EXPORT_SYMBOL(brcmu_pktq_pflush);
 
@@ -271,8 +166,10 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
 
        pq->max = (u16) max_len;
 
-       for (prec = 0; prec < num_prec; prec++)
+       for (prec = 0; prec < num_prec; prec++) {
                pq->q[prec].max = pq->max;
+               skb_queue_head_init(&pq->q[prec].skblist);
+       }
 }
 EXPORT_SYMBOL(brcmu_pktq_init);
 
@@ -284,13 +181,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
                return NULL;
 
        for (prec = 0; prec < pq->hi_prec; prec++)
-               if (pq->q[prec].head)
+               if (!skb_queue_empty(&pq->q[prec].skblist))
                        break;
 
        if (prec_out)
                *prec_out = prec;
 
-       return pq->q[prec].tail;
+       return skb_peek_tail(&pq->q[prec].skblist);
 }
 EXPORT_SYMBOL(brcmu_pktq_peek_tail);
 
@@ -303,7 +200,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
 
        for (prec = 0; prec <= pq->hi_prec; prec++)
                if (prec_bmp & (1 << prec))
-                       len += pq->q[prec].len;
+                       len += pq->q[prec].skblist.qlen;
 
        return len;
 }
@@ -313,39 +210,32 @@ EXPORT_SYMBOL(brcmu_pktq_mlen);
 struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
                                      int *prec_out)
 {
-       struct pktq_prec *q;
+       struct sk_buff_head *q;
        struct sk_buff *p;
        int prec;
 
        if (pq->len == 0)
                return NULL;
 
-       while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+       while ((prec = pq->hi_prec) > 0 &&
+              skb_queue_empty(&pq->q[prec].skblist))
                pq->hi_prec--;
 
-       while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+       while ((prec_bmp & (1 << prec)) == 0 ||
+              skb_queue_empty(&pq->q[prec].skblist))
                if (prec-- == 0)
                        return NULL;
 
-       q = &pq->q[prec];
-
-       p = q->head;
+       q = &pq->q[prec].skblist;
+       p = skb_dequeue(q);
        if (p == NULL)
                return NULL;
 
-       q->head = p->prev;
-       if (q->head == NULL)
-               q->tail = NULL;
-
-       q->len--;
+       pq->len--;
 
        if (prec_out)
                *prec_out = prec;
 
-       pq->len--;
-
-       p->prev = NULL;
-
        return p;
 }
 EXPORT_SYMBOL(brcmu_pktq_mdeq);
@@ -364,23 +254,3 @@ void brcmu_prpkt(const char *msg, struct sk_buff *p0)
 }
 EXPORT_SYMBOL(brcmu_prpkt);
 #endif                         /* defined(BCMDBG) */
-
-#if defined(BCMDBG)
-/*
- * print bytes formatted as hex to a string. return the resulting
- * string length
- */
-int brcmu_format_hex(char *str, const void *bytes, int len)
-{
-       int i;
-       char *p = str;
-       const u8 *src = (const u8 *)bytes;
-
-       for (i = 0; i < len; i++) {
-               p += snprintf(p, 3, "%02X", *src);
-               src++;
-       }
-       return (int)(p - str);
-}
-EXPORT_SYMBOL(brcmu_format_hex);
-#endif                         /* defined(BCMDBG) */
index 7d0f46e..ad249a0 100644 (file)
@@ -65,9 +65,7 @@
 #define ETHER_ADDR_STR_LEN     18
 
 struct pktq_prec {
-       struct sk_buff *head;   /* first packet to dequeue */
-       struct sk_buff *tail;   /* last packet to dequeue */
-       u16 len;                /* number of queued packets */
+       struct sk_buff_head skblist;
        u16 max;                /* maximum number of queued packets */
 };
 
@@ -88,32 +86,32 @@ struct pktq {
 
 static inline int pktq_plen(struct pktq *pq, int prec)
 {
-       return pq->q[prec].len;
+       return pq->q[prec].skblist.qlen;
 }
 
 static inline int pktq_pavail(struct pktq *pq, int prec)
 {
-       return pq->q[prec].max - pq->q[prec].len;
+       return pq->q[prec].max - pq->q[prec].skblist.qlen;
 }
 
 static inline bool pktq_pfull(struct pktq *pq, int prec)
 {
-       return pq->q[prec].len >= pq->q[prec].max;
+       return pq->q[prec].skblist.qlen >= pq->q[prec].max;
 }
 
 static inline bool pktq_pempty(struct pktq *pq, int prec)
 {
-       return pq->q[prec].len == 0;
+       return skb_queue_empty(&pq->q[prec].skblist);
 }
 
 static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
 {
-       return pq->q[prec].head;
+       return skb_peek(&pq->q[prec].skblist);
 }
 
 static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
 {
-       return pq->q[prec].tail;
+       return skb_peek_tail(&pq->q[prec].skblist);
 }
 
 extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
@@ -172,24 +170,16 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
                bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* externs */
-/* packet */
-extern uint brcmu_pktfrombuf(struct sk_buff *p,
-       uint offset, int len, unsigned char *buf);
-extern uint brcmu_pkttotlen(struct sk_buff *p);
-
 /* ip address */
 struct ipv4_addr;
 
+
+/* externs */
+/* format/print */
 #ifdef BCMDBG
 extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
 #else
 #define brcmu_prpkt(a, b)
 #endif                         /* BCMDBG */
 
-/* externs */
-/* format/print */
-#if defined(BCMDBG)
-extern int brcmu_format_hex(char *str, const void *bytes, int len);
-#endif
-
 #endif                         /* _BRCMU_UTILS_H_ */
index 1e5f310..f0d8c04 100644 (file)
@@ -62,7 +62,6 @@
 
 #define WL_RADIO_SW_DISABLE            (1<<0)
 #define WL_RADIO_HW_DISABLE            (1<<1)
-#define WL_RADIO_MPC_DISABLE           (1<<2)
 /* some countries don't support any channel */
 #define WL_RADIO_COUNTRY_DISABLE       (1<<3)
 
index 4fcb956..4e9b7e4 100644 (file)
@@ -77,8 +77,9 @@
 #define        DMEMS_CORE_ID           0x835   /* SDR/DDR1 memory controller core */
 #define        DEF_SHIM_COMP           0x837   /* SHIM component in ubus/6362 */
 #define OOB_ROUTER_CORE_ID     0x367   /* OOB router core ID */
-/* Default component, in ai chips it maps all unused address ranges */
-#define        DEF_AI_COMP             0xfff
+#define        DEF_AI_COMP             0xfff   /* Default component, in ai chips it
+                                        * maps all unused address ranges
+                                        */
 
 /* Common core control flags */
 #define        SICF_BIST_EN            0x8000
 #define        SICF_FGC                0x0002
 #define        SICF_CLOCK_EN           0x0001
 
+/* Common core status flags */
+#define        SISF_BIST_DONE          0x8000
+#define        SISF_BIST_ERROR         0x4000
+#define        SISF_GATED_CLK          0x2000
+#define        SISF_DMA64              0x1000
+#define        SISF_CORE_BITS          0x0fff
+
 #endif                         /* _BRCM_SOC_H */
index 5441ad1..89e9d3a 100644 (file)
@@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
                "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
                0xe6ec52ce, 0x08649af2, 0x4b74baa0),
        PCMCIA_DEVICE_PROD_ID123(
+               "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+               0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
+       PCMCIA_DEVICE_PROD_ID123(
                "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
                0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
        PCMCIA_DEVICE_PROD_ID123(
index 045a936..18054d9 100644 (file)
@@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
        iface = netdev_priv(dev);
        local = iface->local;
 
-       strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
-       snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+       strlcpy(info->driver, "hostap", sizeof(info->driver));
+       snprintf(info->fw_version, sizeof(info->fw_version),
                 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
                 (local->sta_fw_ver >> 8) & 0xff,
                 local->sta_fw_ver & 0xff);
index 127e9c6..a0e5c21 100644 (file)
@@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
        struct ipw2100_priv *priv = libipw_priv(dev);
        char fw_ver[64], ucode_ver[64];
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
        ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
        ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
        snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
                 fw_ver, priv->eeprom_version, ucode_ver);
 
-       strcpy(info->bus_info, pci_name(priv->pci_dev));
+       strlcpy(info->bus_info, pci_name(priv->pci_dev),
+               sizeof(info->bus_info));
 }
 
 static u32 ipw2100_ethtool_get_link(struct net_device *dev)
index 99a710d..018a8de 100644 (file)
@@ -131,6 +131,14 @@ static struct ieee80211_rate ipw2200_rates[] = {
 #define ipw2200_bg_rates       (ipw2200_rates + 0)
 #define ipw2200_num_bg_rates   12
 
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+       (((x) <= 14) ? \
+       (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+       ((x) + 1000) * 5)
+
 #ifdef CONFIG_IPW2200_QOS
 static int qos_enable = 0;
 static int qos_burst_enable = 0;
@@ -10540,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
        char date[32];
        u32 len;
 
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
        len = sizeof(vers);
        ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10550,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
 
        snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
                 vers, date);
-       strcpy(info->bus_info, pci_name(p->pci_dev));
+       strlcpy(info->bus_info, pci_name(p->pci_dev),
+               sizeof(info->bus_info));
        info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
 }
 
index 70f5586..3d5821e 100644 (file)
@@ -66,16 +66,8 @@ extern u32 libipw_debug_level;
 do { if (libipw_debug_level & (level)) \
   printk(KERN_DEBUG "libipw: %c %s " fmt, \
          in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
-       return (libipw_debug_level & level) && net_ratelimit();
-}
 #else
 #define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
-       return false;
-}
 #endif                         /* CONFIG_LIBIPW_DEBUG */
 
 /*
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644 (file)
index 0000000..5e1a19f
--- /dev/null
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+       int p = 0;
+
+       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+                      le32_to_cpu(il->_3945.stats.flag));
+       if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+               p += scnprintf(buf + p, bufsz - p,
+                              "\tStatistics have been cleared\n");
+       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+                      (le32_to_cpu(il->_3945.stats.flag) &
+                       UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+                      (le32_to_cpu(il->_3945.stats.flag) &
+                       UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+       return p;
+}
+
+ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz =
+           sizeof(struct iwl39_stats_rx_phy) * 40 +
+           sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+       ssize_t ret;
+       struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+       struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+       struct iwl39_stats_rx_non_phy *general, *accum_general;
+       struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The statistic information display here is based on
+        * the last stats notification from uCode
+        * might not reflect the current uCode activity
+        */
+       ofdm = &il->_3945.stats.rx.ofdm;
+       cck = &il->_3945.stats.rx.cck;
+       general = &il->_3945.stats.rx.general;
+       accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+       accum_cck = &il->_3945.accum_stats.rx.cck;
+       accum_general = &il->_3945.accum_stats.rx.general;
+       delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+       delta_cck = &il->_3945.delta_stats.rx.cck;
+       delta_general = &il->_3945.delta_stats.rx.general;
+       max_ofdm = &il->_3945.max_delta.rx.ofdm;
+       max_cck = &il->_3945.max_delta.rx.cck;
+       max_general = &il->_3945.max_delta.rx.general;
+
+       pos += il3945_stats_flag(il, buf, bufsz);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "%-32s     current"
+                     "acumulative       delta         max\n",
+                     "Statistics_Rx - OFDM:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "ina_cnt:",
+                     le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+                     delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "fina_cnt:",
+                     le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+                     delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+                     le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+                     delta_ofdm->plcp_err, max_ofdm->plcp_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "crc32_err:",
+                     le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+                     delta_ofdm->crc32_err, max_ofdm->crc32_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+                     le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+                     delta_ofdm->overrun_err, max_ofdm->overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "early_overrun_err:",
+                     le32_to_cpu(ofdm->early_overrun_err),
+                     accum_ofdm->early_overrun_err,
+                     delta_ofdm->early_overrun_err,
+                     max_ofdm->early_overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "crc32_good:",
+                     le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+                     delta_ofdm->crc32_good, max_ofdm->crc32_good);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+                     le32_to_cpu(ofdm->false_alarm_cnt),
+                     accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+                     max_ofdm->false_alarm_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "fina_sync_err_cnt:",
+                     le32_to_cpu(ofdm->fina_sync_err_cnt),
+                     accum_ofdm->fina_sync_err_cnt,
+                     delta_ofdm->fina_sync_err_cnt,
+                     max_ofdm->fina_sync_err_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sfd_timeout:",
+                     le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+                     delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "fina_timeout:",
+                     le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+                     delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "unresponded_rts:",
+                     le32_to_cpu(ofdm->unresponded_rts),
+                     accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+                     max_ofdm->unresponded_rts);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n",
+                     "rxe_frame_lmt_ovrun:",
+                     le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+                     accum_ofdm->rxe_frame_limit_overrun,
+                     delta_ofdm->rxe_frame_limit_overrun,
+                     max_ofdm->rxe_frame_limit_overrun);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sent_ack_cnt:",
+                     le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+                     delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sent_cts_cnt:",
+                     le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+                     delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "%-32s     current"
+                     "acumulative       delta         max\n",
+                     "Statistics_Rx - CCK:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "ina_cnt:",
+                     le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+                     delta_cck->ina_cnt, max_cck->ina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "fina_cnt:",
+                     le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+                     delta_cck->fina_cnt, max_cck->fina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+                     le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+                     delta_cck->plcp_err, max_cck->plcp_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "crc32_err:",
+                     le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+                     delta_cck->crc32_err, max_cck->crc32_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+                     le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+                     delta_cck->overrun_err, max_cck->overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "early_overrun_err:",
+                     le32_to_cpu(cck->early_overrun_err),
+                     accum_cck->early_overrun_err,
+                     delta_cck->early_overrun_err, max_cck->early_overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "crc32_good:",
+                     le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+                     delta_cck->crc32_good, max_cck->crc32_good);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+                     le32_to_cpu(cck->false_alarm_cnt),
+                     accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+                     max_cck->false_alarm_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "fina_sync_err_cnt:",
+                     le32_to_cpu(cck->fina_sync_err_cnt),
+                     accum_cck->fina_sync_err_cnt,
+                     delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sfd_timeout:",
+                     le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+                     delta_cck->sfd_timeout, max_cck->sfd_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "fina_timeout:",
+                     le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+                     delta_cck->fina_timeout, max_cck->fina_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "unresponded_rts:",
+                     le32_to_cpu(cck->unresponded_rts),
+                     accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+                     max_cck->unresponded_rts);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n",
+                     "rxe_frame_lmt_ovrun:",
+                     le32_to_cpu(cck->rxe_frame_limit_overrun),
+                     accum_cck->rxe_frame_limit_overrun,
+                     delta_cck->rxe_frame_limit_overrun,
+                     max_cck->rxe_frame_limit_overrun);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sent_ack_cnt:",
+                     le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+                     delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sent_cts_cnt:",
+                     le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+                     delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "%-32s     current"
+                     "acumulative       delta         max\n",
+                     "Statistics_Rx - GENERAL:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "bogus_cts:",
+                     le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+                     delta_general->bogus_cts, max_general->bogus_cts);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "bogus_ack:",
+                     le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+                     delta_general->bogus_ack, max_general->bogus_ack);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "non_bssid_frames:",
+                     le32_to_cpu(general->non_bssid_frames),
+                     accum_general->non_bssid_frames,
+                     delta_general->non_bssid_frames,
+                     max_general->non_bssid_frames);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "filtered_frames:",
+                     le32_to_cpu(general->filtered_frames),
+                     accum_general->filtered_frames,
+                     delta_general->filtered_frames,
+                     max_general->filtered_frames);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n",
+                     "non_channel_beacons:",
+                     le32_to_cpu(general->non_channel_beacons),
+                     accum_general->non_channel_beacons,
+                     delta_general->non_channel_beacons,
+                     max_general->non_channel_beacons);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+       ssize_t ret;
+       struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The statistic information display here is based on
+        * the last stats notification from uCode
+        * might not reflect the current uCode activity
+        */
+       tx = &il->_3945.stats.tx;
+       accum_tx = &il->_3945.accum_stats.tx;
+       delta_tx = &il->_3945.delta_stats.tx;
+       max_tx = &il->_3945.max_delta.tx;
+       pos += il3945_stats_flag(il, buf, bufsz);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "%-32s     current"
+                     "acumulative       delta         max\n",
+                     "Statistics_Tx:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "preamble:",
+                     le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+                     delta_tx->preamble_cnt, max_tx->preamble_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "rx_detected_cnt:",
+                     le32_to_cpu(tx->rx_detected_cnt),
+                     accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+                     max_tx->rx_detected_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "bt_prio_defer_cnt:",
+                     le32_to_cpu(tx->bt_prio_defer_cnt),
+                     accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+                     max_tx->bt_prio_defer_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "bt_prio_kill_cnt:",
+                     le32_to_cpu(tx->bt_prio_kill_cnt),
+                     accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+                     max_tx->bt_prio_kill_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "few_bytes_cnt:",
+                     le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+                     delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "cts_timeout:",
+                     le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+                     delta_tx->cts_timeout, max_tx->cts_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "ack_timeout:",
+                     le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+                     delta_tx->ack_timeout, max_tx->ack_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "expected_ack_cnt:",
+                     le32_to_cpu(tx->expected_ack_cnt),
+                     accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+                     max_tx->expected_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "actual_ack_cnt:",
+                     le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+                     delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+       ssize_t ret;
+       struct iwl39_stats_general *general, *accum_general;
+       struct iwl39_stats_general *delta_general, *max_general;
+       struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+       struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The statistic information display here is based on
+        * the last stats notification from uCode
+        * might not reflect the current uCode activity
+        */
+       general = &il->_3945.stats.general;
+       dbg = &il->_3945.stats.general.dbg;
+       div = &il->_3945.stats.general.div;
+       accum_general = &il->_3945.accum_stats.general;
+       delta_general = &il->_3945.delta_stats.general;
+       max_general = &il->_3945.max_delta.general;
+       accum_dbg = &il->_3945.accum_stats.general.dbg;
+       delta_dbg = &il->_3945.delta_stats.general.dbg;
+       max_dbg = &il->_3945.max_delta.general.dbg;
+       accum_div = &il->_3945.accum_stats.general.div;
+       delta_div = &il->_3945.delta_stats.general.div;
+       max_div = &il->_3945.max_delta.general.div;
+       pos += il3945_stats_flag(il, buf, bufsz);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "%-32s     current"
+                     "acumulative       delta         max\n",
+                     "Statistics_General:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "burst_check:",
+                     le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+                     delta_dbg->burst_check, max_dbg->burst_check);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "burst_count:",
+                     le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+                     delta_dbg->burst_count, max_dbg->burst_count);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "sleep_time:",
+                     le32_to_cpu(general->sleep_time),
+                     accum_general->sleep_time, delta_general->sleep_time,
+                     max_general->sleep_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "slots_out:",
+                     le32_to_cpu(general->slots_out), accum_general->slots_out,
+                     delta_general->slots_out, max_general->slots_out);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "slots_idle:",
+                     le32_to_cpu(general->slots_idle),
+                     accum_general->slots_idle, delta_general->slots_idle,
+                     max_general->slots_idle);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+                     le32_to_cpu(general->ttl_timestamp));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "tx_on_a:",
+                     le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+                     delta_div->tx_on_a, max_div->tx_on_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "tx_on_b:",
+                     le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+                     delta_div->tx_on_b, max_div->tx_on_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "exec_time:",
+                     le32_to_cpu(div->exec_time), accum_div->exec_time,
+                     delta_div->exec_time, max_div->exec_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "  %-30s %10u  %10u  %10u  %10u\n", "probe_time:",
+                     le32_to_cpu(div->probe_time), accum_div->probe_time,
+                     delta_div->probe_time, max_div->probe_time);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644 (file)
index 0000000..daef6b5
--- /dev/null
@@ -0,0 +1,3977 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME       "iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION        \
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION  IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+       .sw_crypto = 1,
+       .restart_fw = 1,
+       .disable_hw_scan = 1,
+       /* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39  is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN      - Force MAIN antenna
+ * IL_ANTENNA_AUX       - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+       switch (il3945_mod_params.antenna) {
+       case IL_ANTENNA_DIVERSITY:
+               return 0;
+
+       case IL_ANTENNA_MAIN:
+               if (eeprom->antenna_switch_type)
+                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+       case IL_ANTENNA_AUX:
+               if (eeprom->antenna_switch_type)
+                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+       }
+
+       /* bad antenna selector value */
+       IL_ERR("Bad antenna selector value (0x%x)\n",
+              il3945_mod_params.antenna);
+
+       return 0;               /* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+                                struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       int ret;
+
+       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+       if (sta_id == il->ctx.bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->hw_key_idx = keyconf->keyidx;
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+       memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+       memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+       if ((il->stations[sta_id].sta.key.
+            key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+               il->stations[sta_id].sta.key.key_offset =
+                   il_get_free_ucode_key_idx(il);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+            "no space for a new key");
+
+       il->stations[sta_id].sta.key.key_flags = key_flags;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       D_INFO("hwcrypto: modify ucode station key info\n");
+
+       ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+                                struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+                               struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+       unsigned long flags;
+       struct il_addsta_cmd sta_cmd;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+       memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+       il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       D_INFO("hwcrypto: clear ucode station key info\n");
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+                      u8 sta_id)
+{
+       int ret = 0;
+
+       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+               break;
+       default:
+               IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+               ret = -EINVAL;
+       }
+
+       D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+             keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+       return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+       int ret = -EOPNOTSUPP;
+
+       return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+       if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+           key->cipher == WLAN_CIPHER_SUITE_WEP104)
+               return -EOPNOTSUPP;
+
+       IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+       return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+       struct list_head *element;
+
+       D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+       while (!list_empty(&il->free_frames)) {
+               element = il->free_frames.next;
+               list_del(element);
+               kfree(list_entry(element, struct il3945_frame, list));
+               il->frames_count--;
+       }
+
+       if (il->frames_count) {
+               IL_WARN("%d frames still in use.  Did we lose one?\n",
+                       il->frames_count);
+               il->frames_count = 0;
+       }
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+       struct il3945_frame *frame;
+       struct list_head *element;
+       if (list_empty(&il->free_frames)) {
+               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+               if (!frame) {
+                       IL_ERR("Could not allocate frame!\n");
+                       return NULL;
+               }
+
+               il->frames_count++;
+               return frame;
+       }
+
+       element = il->free_frames.next;
+       list_del(element);
+       return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+       memset(frame, 0, sizeof(*frame));
+       list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+                        int left)
+{
+
+       if (!il_is_associated(il) || !il->beacon_skb)
+               return 0;
+
+       if (il->beacon_skb->len > left)
+               return 0;
+
+       memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+       return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+       struct il3945_frame *frame;
+       unsigned int frame_size;
+       int rc;
+       u8 rate;
+
+       frame = il3945_get_free_frame(il);
+
+       if (!frame) {
+               IL_ERR("Could not obtain free frame buffer for beacon "
+                      "command.\n");
+               return -ENOMEM;
+       }
+
+       rate = il_get_lowest_plcp(il, &il->ctx);
+
+       frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+       rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+       il3945_free_frame(il, frame);
+
+       return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+       if (il->_3945.shared_virt)
+               dma_free_coherent(&il->pci_dev->dev,
+                                 sizeof(struct il3945_shared),
+                                 il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+                            struct il_device_cmd *cmd,
+                            struct sk_buff *skb_frag, int sta_id)
+{
+       struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+       struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+       tx_cmd->sec_ctl = 0;
+
+       switch (keyinfo->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+               memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+               D_TX("tx_cmd with AES hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_TKIP:
+               break;
+
+       case WLAN_CIPHER_SUITE_WEP104:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_WEP40:
+               tx_cmd->sec_ctl |=
+                   TX_CMD_SEC_WEP | (info->control.hw_key->
+                                     hw_key_idx & TX_CMD_SEC_MSK) <<
+                   TX_CMD_SEC_SHIFT;
+
+               memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+               D_TX("Configuring packet for WEP encryption " "with key %d\n",
+                    info->control.hw_key->hw_key_idx);
+               break;
+
+       default:
+               IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+               break;
+       }
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_hdr *hdr, u8 std_id)
+{
+       struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+       __le32 tx_flags = tx_cmd->tx_flags;
+       __le16 fc = hdr->frame_control;
+
+       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+               tx_flags |= TX_CMD_FLG_ACK_MSK;
+               if (ieee80211_is_mgmt(fc))
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (ieee80211_is_probe_resp(fc) &&
+                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+                       tx_flags |= TX_CMD_FLG_TSF_MSK;
+       } else {
+               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       tx_cmd->sta_id = std_id;
+       if (ieee80211_has_morefrags(fc))
+               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tx_cmd->tid_tspec = qc[0] & 0xf;
+               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+       } else {
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+               else
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+       } else {
+               tx_cmd->timeout.pm_frame_timeout = 0;
+       }
+
+       tx_cmd->driver_txop = 0;
+       tx_cmd->tx_flags = tx_flags;
+       tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct il3945_tx_cmd *tx_cmd;
+       struct il_tx_queue *txq = NULL;
+       struct il_queue *q = NULL;
+       struct il_device_cmd *out_cmd;
+       struct il_cmd_meta *out_meta;
+       dma_addr_t phys_addr;
+       dma_addr_t txcmd_phys;
+       int txq_id = skb_get_queue_mapping(skb);
+       u16 len, idx, hdr_len;
+       u8 id;
+       u8 unicast;
+       u8 sta_id;
+       u8 tid = 0;
+       __le16 fc;
+       u8 wait_write_ptr = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&il->lock, flags);
+       if (il_is_rfkill(il)) {
+               D_DROP("Dropping - RF KILL\n");
+               goto drop_unlock;
+       }
+
+       if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+           IL_INVALID_RATE) {
+               IL_ERR("ERROR: No TX rate available.\n");
+               goto drop_unlock;
+       }
+
+       unicast = !is_multicast_ether_addr(hdr->addr1);
+       id = 0;
+
+       fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (ieee80211_is_auth(fc))
+               D_TX("Sending AUTH frame\n");
+       else if (ieee80211_is_assoc_req(fc))
+               D_TX("Sending ASSOC frame\n");
+       else if (ieee80211_is_reassoc_req(fc))
+               D_TX("Sending REASSOC frame\n");
+#endif
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       hdr_len = ieee80211_hdrlen(fc);
+
+       /* Find idx into station table for destination station */
+       sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+       if (sta_id == IL_INVALID_STATION) {
+               D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+               goto drop;
+       }
+
+       D_RATE("station Id %d\n", sta_id);
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               if (unlikely(tid >= MAX_TID_COUNT))
+                       goto drop;
+       }
+
+       /* Descriptor for chosen Tx queue */
+       txq = &il->txq[txq_id];
+       q = &txq->q;
+
+       if ((il_queue_space(q) < q->high_mark))
+               goto drop;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+       /* Set up driver data for this TFD */
+       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+       txq->txb[q->write_ptr].skb = skb;
+       txq->txb[q->write_ptr].ctx = &il->ctx;
+
+       /* Init first empty entry in queue's array of Tx/cmd buffers */
+       out_cmd = txq->cmd[idx];
+       out_meta = &txq->meta[idx];
+       tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+       memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+       /*
+        * Set up the Tx-command (not MAC!) header.
+        * Store the chosen Tx queue and TFD idx within the sequence field;
+        * after Tx, uCode's Tx response will return this value so driver can
+        * locate the frame within the tx queue and do post-tx processing.
+        */
+       out_cmd->hdr.cmd = C_TX;
+       out_cmd->hdr.sequence =
+           cpu_to_le16((u16)
+                       (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+       if (info->control.hw_key)
+               il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+       /* TODO need this for burst mode later on */
+       il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+       /* set is_hcca to 0; it probably will never be implemented */
+       il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id, 0);
+
+       /* Total # bytes to be transmitted */
+       len = (u16) skb->len;
+       tx_cmd->len = cpu_to_le16(len);
+
+       il_dbg_log_tx_data_frame(il, len, hdr);
+       il_update_stats(il, true, fc, len);
+       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
+
+       D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+       D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+       il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+       il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+                         ieee80211_hdrlen(fc));
+
+       /*
+        * Use the first empty entry in this queue's command buffer array
+        * to contain the Tx command and MAC header concatenated together
+        * (payload data will be in another buffer).
+        * Size of this varies, due to varying MAC header length.
+        * If end is not dword aligned, we'll have 2 extra bytes at the end
+        * of the MAC header (device reads on dword boundaries).
+        * We'll tell device about this padding later.
+        */
+       len =
+           sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+           hdr_len;
+       len = (len + 3) & ~3;
+
+       /* Physical address of this Tx command's header (not MAC header!),
+        * within command buffer array. */
+       txcmd_phys =
+           pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+       /* we do not map meta data ... so we can safely access address to
+        * provide to unmap command*/
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, len);
+
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
+                                                0);
+
+       /* Set up TFD's 2nd entry to point directly to remainder of skb,
+        * if any (802.11 null frames have no payload). */
+       len = skb->len - hdr_len;
+       if (len) {
+               phys_addr =
+                   pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+                                  PCI_DMA_TODEVICE);
+               il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+                                                        len, 0, U32_PAD(len));
+       }
+
+       /* Tell device the write idx *just past* this latest filled TFD */
+       q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+       il_txq_update_write_ptr(il, txq);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+               if (wait_write_ptr) {
+                       spin_lock_irqsave(&il->lock, flags);
+                       txq->need_update = 1;
+                       il_txq_update_write_ptr(il, txq);
+                       spin_unlock_irqrestore(&il->lock, flags);
+               }
+
+               il_stop_queue(il, txq);
+       }
+
+       return 0;
+
+drop_unlock:
+       spin_unlock_irqrestore(&il->lock, flags);
+drop:
+       return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+                      struct ieee80211_measurement_params *params, u8 type)
+{
+       struct il_spectrum_cmd spectrum;
+       struct il_rx_pkt *pkt;
+       struct il_host_cmd cmd = {
+               .id = C_SPECTRUM_MEASUREMENT,
+               .data = (void *)&spectrum,
+               .flags = CMD_WANT_SKB,
+       };
+       u32 add_time = le64_to_cpu(params->start_time);
+       int rc;
+       int spectrum_resp_status;
+       int duration = le16_to_cpu(params->duration);
+       struct il_rxon_context *ctx = &il->ctx;
+
+       if (il_is_associated(il))
+               add_time =
+                   il_usecs_to_beacons(il,
+                                       le64_to_cpu(params->start_time) -
+                                       il->_3945.last_tsf,
+                                       le16_to_cpu(ctx->timing.
+                                                   beacon_interval));
+
+       memset(&spectrum, 0, sizeof(spectrum));
+
+       spectrum.channel_count = cpu_to_le16(1);
+       spectrum.flags =
+           RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+       spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+       cmd.len = sizeof(spectrum);
+       spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+       if (il_is_associated(il))
+               spectrum.start_time =
+                   il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+                                      le16_to_cpu(ctx->timing.
+                                                  beacon_interval));
+       else
+               spectrum.start_time = 0;
+
+       spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+       spectrum.channels[0].channel = params->channel;
+       spectrum.channels[0].type = type;
+       if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+               spectrum.flags |=
+                   RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+                   RXON_FLG_TGG_PROTECT_MSK;
+
+       rc = il_send_cmd_sync(il, &cmd);
+       if (rc)
+               return rc;
+
+       pkt = (struct il_rx_pkt *)cmd.reply_page;
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+               rc = -EIO;
+       }
+
+       spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+       switch (spectrum_resp_status) {
+       case 0:         /* Command will be handled */
+               if (pkt->u.spectrum.id != 0xff) {
+                       D_INFO("Replaced existing measurement: %d\n",
+                              pkt->u.spectrum.id);
+                       il->measurement_status &= ~MEASUREMENT_READY;
+               }
+               il->measurement_status |= MEASUREMENT_ACTIVE;
+               rc = 0;
+               break;
+
+       case 1:         /* Command will not be handled */
+               rc = -EAGAIN;
+               break;
+       }
+
+       il_free_pages(il, cmd.reply_page);
+
+       return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_alive_resp *palive;
+       struct delayed_work *pwork;
+
+       palive = &pkt->u.alive_frame;
+
+       D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+              palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+               D_INFO("Initialization Alive received.\n");
+               memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+                      sizeof(struct il_alive_resp));
+               pwork = &il->init_alive_start;
+       } else {
+               D_INFO("Runtime Alive received.\n");
+               memcpy(&il->card_alive, &pkt->u.alive_frame,
+                      sizeof(struct il_alive_resp));
+               pwork = &il->alive_start;
+               il3945_disable_events(il);
+       }
+
+       /* We delay the ALIVE response by 5ms to
+        * give the HW RF Kill time to activate... */
+       if (palive->is_valid == UCODE_VALID_OK)
+               queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+       else
+               IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+       D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+       u8 rate = beacon->beacon_notify_hdr.rate;
+
+       D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+            le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+            beacon->beacon_notify_hdr.failure_frame,
+            le32_to_cpu(beacon->ibss_mgr_status),
+            le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+       il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+       unsigned long status = il->status;
+
+       IL_WARN("Card state received: HW:%s SW:%s\n",
+               (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+               (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+       _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       if (flags & HW_CARD_DISABLED)
+               set_bit(S_RF_KILL_HW, &il->status);
+       else
+               clear_bit(S_RF_KILL_HW, &il->status);
+
+       il_scan_cancel(il);
+
+       if ((test_bit(S_RF_KILL_HW, &status) !=
+            test_bit(S_RF_KILL_HW, &il->status)))
+               wiphy_rfkill_set_hw_state(il->hw->wiphy,
+                                         test_bit(S_RF_KILL_HW, &il->status));
+       else
+               wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+       il->handlers[N_ALIVE] = il3945_hdl_alive;
+       il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+       il->handlers[N_ERROR] = il_hdl_error;
+       il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+       il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+       il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+       il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+       il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+       /*
+        * The same handler is used for both the REPLY to a discrete
+        * stats request from the host as well as for the periodic
+        * stats notifications (after received beacons) from the uCode.
+        */
+       il->handlers[C_STATS] = il3945_hdl_c_stats;
+       il->handlers[N_STATS] = il3945_hdl_stats;
+
+       il_setup_rx_scan_handlers(il);
+       il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+       /* Set up hardware specific Rx handlers */
+       il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt.  The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ IDX is updated (updating the
+ *   'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE idx.  If insufficient rx_free buffers
+ *                            are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx()         Detach il_rx_bufs from pool up to the
+ *                            READ IDX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls il3945_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+       return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+       struct il_rx_queue *rxq = &il->rxq;
+       struct list_head *element;
+       struct il_rx_buf *rxb;
+       unsigned long flags;
+       int write;
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       write = rxq->write & ~0x7;
+       while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+               /* Get next free Rx buffer, remove from free list */
+               element = rxq->rx_free.next;
+               rxb = list_entry(element, struct il_rx_buf, list);
+               list_del(element);
+
+               /* Point to Rx buffer via next RBD in circular buffer */
+               rxq->bd[rxq->write] =
+                   il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+               rxq->queue[rxq->write] = rxb;
+               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+               rxq->free_count--;
+       }
+       spin_unlock_irqrestore(&rxq->lock, flags);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               queue_work(il->workqueue, &il->rx_replenish);
+
+       /* If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8. */
+       if (rxq->write_actual != (rxq->write & ~0x7) ||
+           abs(rxq->write - rxq->read) > 7) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               rxq->need_update = 1;
+               spin_unlock_irqrestore(&rxq->lock, flags);
+               il_rx_queue_update_write_ptr(il, rxq);
+       }
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+       struct il_rx_queue *rxq = &il->rxq;
+       struct list_head *element;
+       struct il_rx_buf *rxb;
+       struct page *page;
+       unsigned long flags;
+       gfp_t gfp_mask = priority;
+
+       while (1) {
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (il->hw_params.rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
+               /* Alloc a new receive buffer */
+               page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               D_INFO("Failed to allocate SKB buffer.\n");
+                       if (rxq->free_count <= RX_LOW_WATERMARK &&
+                           net_ratelimit())
+                               IL_ERR("Failed to allocate SKB buffer with %0x."
+                                      "Only %u free buffers remaining.\n",
+                                      priority, rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
+                       break;
+               }
+
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       __free_pages(page, il->hw_params.rx_page_order);
+                       return;
+               }
+               element = rxq->rx_used.next;
+               rxb = list_entry(element, struct il_rx_buf, list);
+               list_del(element);
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               rxb->page = page;
+               /* Get physical address of RB/SKB */
+               rxb->page_dma =
+                   pci_map_page(il->pci_dev, page, 0,
+                                PAGE_SIZE << il->hw_params.rx_page_order,
+                                PCI_DMA_FROMDEVICE);
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               list_add_tail(&rxb->list, &rxq->rx_free);
+               rxq->free_count++;
+               il->alloc_rxb_page++;
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+       }
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+       unsigned long flags;
+       int i;
+       spin_lock_irqsave(&rxq->lock, flags);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+               /* In the reset function, these buffers may have been allocated
+                * to an SKB, so we need to unmap and free potential storage */
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+                                      PAGE_SIZE << il->hw_params.rx_page_order,
+                                      PCI_DMA_FROMDEVICE);
+                       __il_free_pages(il, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+       }
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+       struct il_priv *il = data;
+       unsigned long flags;
+
+       il3945_rx_allocate(il, GFP_KERNEL);
+
+       spin_lock_irqsave(&il->lock, flags);
+       il3945_rx_queue_restock(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+       il3945_rx_allocate(il, GFP_ATOMIC);
+
+       il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+       int i;
+       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+                                      PAGE_SIZE << il->hw_params.rx_page_order,
+                                      PCI_DMA_FROMDEVICE);
+                       __il_free_pages(il, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+       }
+
+       dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+       dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+                         rxq->rb_stts, rxq->rb_stts_dma);
+       rxq->bd = NULL;
+       rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/*      0   1   2   3   4   5   6   7   8   9 */
+       0, 0, 6, 10, 12, 14, 16, 17, 18, 19,    /* 00 - 09 */
+       20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
+       26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
+       29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
+       32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
+       34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
+       36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
+       37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
+       38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
+       39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ *   (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+       /* 1000:1 or higher just report as 60 dB */
+       if (sig_ratio >= 1000)
+               return 60;
+
+       /* 100:1 or higher, divide by 10 and use table,
+        *   add 20 dB to make up for divide by 10 */
+       if (sig_ratio >= 100)
+               return 20 + (int)ratio2dB[sig_ratio / 10];
+
+       /* We shouldn't see this */
+       if (sig_ratio < 1)
+               return 0;
+
+       /* Use table for ratios 1:1 - 99:1 */
+       return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+       struct il_rx_buf *rxb;
+       struct il_rx_pkt *pkt;
+       struct il_rx_queue *rxq = &il->rxq;
+       u32 r, i;
+       int reclaim;
+       unsigned long flags;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty = 0;
+
+       /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+        * buffer that the driver may process (last buffer filled by ucode). */
+       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+       i = rxq->read;
+
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+       /* Rx interrupt, but nothing sent from uCode */
+       if (i == r)
+               D_RX("r = %d, i = %d\n", r, i);
+
+       while (i != r) {
+               int len;
+
+               rxb = rxq->queue[i];
+
+               /* If an RXB doesn't have a Rx queue slot associated with it,
+                * then a bug has been introduced in the queue refilling
+                * routines -- catch it here */
+               BUG_ON(rxb == NULL);
+
+               rxq->queue[i] = NULL;
+
+               pci_unmap_page(il->pci_dev, rxb->page_dma,
+                              PAGE_SIZE << il->hw_params.rx_page_order,
+                              PCI_DMA_FROMDEVICE);
+               pkt = rxb_addr(rxb);
+
+               len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+               len += sizeof(u32);     /* account for status word */
+
+               /* Reclaim a command buffer only if this packet is a response
+                *   to a (driver-originated) command.
+                * If the packet (e.g. Rx frame) originated from uCode,
+                *   there is no command buffer to reclaim.
+                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+                *   but apparently a few don't get set; catch them here. */
+               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+                   pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+
+               /* Based on type of command response or notification,
+                *   handle those that need handling via function in
+                *   handlers table.  See il3945_setup_handlers() */
+               if (il->handlers[pkt->hdr.cmd]) {
+                       D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+                            il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+                       il->isr_stats.handlers[pkt->hdr.cmd]++;
+                       il->handlers[pkt->hdr.cmd] (il, rxb);
+               } else {
+                       /* No handling needed */
+                       D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+                            i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+               }
+
+               /*
+                * XXX: After here, we should always check rxb->page
+                * against NULL before touching it or its virtual
+                * memory (pkt). Because some handler might have
+                * already taken or freed the pages.
+                */
+
+               if (reclaim) {
+                       /* Invoke any callbacks, transfer the buffer to caller,
+                        * and fire off the (possibly) blocking il_send_cmd()
+                        * as we reclaim the driver command queue */
+                       if (rxb->page)
+                               il_tx_cmd_complete(il, rxb);
+                       else
+                               IL_WARN("Claim null rxb?\n");
+               }
+
+               /* Reuse the page if possible. For notification packets and
+                * SKBs that fail to Rx correctly, add them back into the
+                * rx_free list for reuse later. */
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (rxb->page != NULL) {
+                       rxb->page_dma =
+                           pci_map_page(il->pci_dev, rxb->page, 0,
+                                        PAGE_SIZE << il->hw_params.
+                                        rx_page_order, PCI_DMA_FROMDEVICE);
+                       list_add_tail(&rxb->list, &rxq->rx_free);
+                       rxq->free_count++;
+               } else
+                       list_add_tail(&rxb->list, &rxq->rx_used);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               i = (i + 1) & RX_QUEUE_MASK;
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode won't assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               il3945_rx_replenish_now(il);
+                               count = 0;
+                       }
+               }
+       }
+
+       /* Backtrack one entry */
+       rxq->read = i;
+       if (fill_rx)
+               il3945_rx_replenish_now(il);
+       else
+               il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+       /* wait to make sure we flush pending tasklet */
+       synchronize_irq(il->pci_dev->irq);
+       tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+       switch (i) {
+       case 1:
+               return "FAIL";
+       case 2:
+               return "BAD_PARAM";
+       case 3:
+               return "BAD_CHECKSUM";
+       case 4:
+               return "NMI_INTERRUPT";
+       case 5:
+               return "SYSASSERT";
+       case 6:
+               return "FATAL_ERROR";
+       }
+
+       return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+       u32 i;
+       u32 desc, time, count, base, data1;
+       u32 blink1, blink2, ilink1, ilink2;
+
+       base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+       if (!il3945_hw_valid_rtc_data_addr(base)) {
+               IL_ERR("Not valid error log pointer 0x%08X\n", base);
+               return;
+       }
+
+       count = il_read_targ_mem(il, base);
+
+       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+               IL_ERR("Start IWL Error Log Dump:\n");
+               IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+       }
+
+       IL_ERR("Desc       Time       asrtPC  blink2 "
+              "ilink1  nmiPC   Line\n");
+       for (i = ERROR_START_OFFSET;
+            i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+            i += ERROR_ELEM_SIZE) {
+               desc = il_read_targ_mem(il, base + i);
+               time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+               blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+               blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+               ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+               ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+               data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+               IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+                      il3945_desc_lookup(desc), desc, time, blink1, blink2,
+                      ilink1, ilink2, data1);
+       }
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+       u32 inta_mask;
+#endif
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Ack/clear/reset pending uCode interrupts.
+        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+       inta = _il_rd(il, CSR_INT);
+       _il_wr(il, CSR_INT, inta);
+
+       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+        * Any new interrupts that happen after this, either while we're
+        * in this tasklet, or later, will show up in next ISR/tasklet. */
+       inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+       _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & IL_DL_ISR) {
+               /* just for debug */
+               inta_mask = _il_rd(il, CSR_INT_MASK);
+               D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+                     inta_mask, inta_fh);
+       }
+#endif
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+        * atomic, make sure that inta covers all the interrupts that
+        * we've discovered, even if FH interrupt came in just after
+        * reading CSR_INT. */
+       if (inta_fh & CSR39_FH_INT_RX_MASK)
+               inta |= CSR_INT_BIT_FH_RX;
+       if (inta_fh & CSR39_FH_INT_TX_MASK)
+               inta |= CSR_INT_BIT_FH_TX;
+
+       /* Now service all interrupt bits discovered above. */
+       if (inta & CSR_INT_BIT_HW_ERR) {
+               IL_ERR("Hardware error detected.  Restarting.\n");
+
+               /* Tell the device to stop sending interrupts */
+               il_disable_interrupts(il);
+
+               il->isr_stats.hw++;
+               il_irq_handle_error(il);
+
+               handled |= CSR_INT_BIT_HW_ERR;
+
+               return;
+       }
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & (IL_DL_ISR)) {
+               /* NIC fires this, but we don't use it, redundant with WAKEUP */
+               if (inta & CSR_INT_BIT_SCD) {
+                       D_ISR("Scheduler finished to transmit "
+                             "the frame/frames.\n");
+                       il->isr_stats.sch++;
+               }
+
+               /* Alive notification via Rx interrupt will do the real work */
+               if (inta & CSR_INT_BIT_ALIVE) {
+                       D_ISR("Alive interrupt\n");
+                       il->isr_stats.alive++;
+               }
+       }
+#endif
+       /* Safely ignore these bits for debug checks below */
+       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+       /* Error detected by uCode */
+       if (inta & CSR_INT_BIT_SW_ERR) {
+               IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+                      inta);
+               il->isr_stats.sw++;
+               il_irq_handle_error(il);
+               handled |= CSR_INT_BIT_SW_ERR;
+       }
+
+       /* uCode wakes up after power-down sleep */
+       if (inta & CSR_INT_BIT_WAKEUP) {
+               D_ISR("Wakeup interrupt\n");
+               il_rx_queue_update_write_ptr(il, &il->rxq);
+               il_txq_update_write_ptr(il, &il->txq[0]);
+               il_txq_update_write_ptr(il, &il->txq[1]);
+               il_txq_update_write_ptr(il, &il->txq[2]);
+               il_txq_update_write_ptr(il, &il->txq[3]);
+               il_txq_update_write_ptr(il, &il->txq[4]);
+               il_txq_update_write_ptr(il, &il->txq[5]);
+
+               il->isr_stats.wakeup++;
+               handled |= CSR_INT_BIT_WAKEUP;
+       }
+
+       /* All uCode command responses, including Tx command responses,
+        * Rx "responses" (frame-received notification), and other
+        * notifications from uCode come through here*/
+       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+               il3945_rx_handle(il);
+               il->isr_stats.rx++;
+               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+       }
+
+       if (inta & CSR_INT_BIT_FH_TX) {
+               D_ISR("Tx interrupt\n");
+               il->isr_stats.tx++;
+
+               _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+               il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+               handled |= CSR_INT_BIT_FH_TX;
+       }
+
+       if (inta & ~handled) {
+               IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+               il->isr_stats.unhandled++;
+       }
+
+       if (inta & ~il->inta_mask) {
+               IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+                       inta & ~il->inta_mask);
+               IL_WARN("   with inta_fh = 0x%08x\n", inta_fh);
+       }
+
+       /* Re-enable all interrupts */
+       /* only Re-enable if disabled by irq */
+       if (test_bit(S_INT_ENABLED, &il->status))
+               il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & (IL_DL_ISR)) {
+               inta = _il_rd(il, CSR_INT);
+               inta_mask = _il_rd(il, CSR_INT_MASK);
+               inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+               D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+                     "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+       }
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+                            u8 is_active, u8 n_probes,
+                            struct il3945_scan_channel *scan_ch,
+                            struct ieee80211_vif *vif)
+{
+       struct ieee80211_channel *chan;
+       const struct ieee80211_supported_band *sband;
+       const struct il_channel_info *ch_info;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added, i;
+
+       sband = il_get_hw_mode(il, band);
+       if (!sband)
+               return 0;
+
+       active_dwell = il_get_active_dwell_time(il, band, n_probes);
+       passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+               chan = il->scan_request->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               scan_ch->channel = chan->hw_value;
+
+               ch_info = il_get_channel_info(il, band, scan_ch->channel);
+               if (!il_is_channel_valid(ch_info)) {
+                       D_SCAN("Channel %d is INVALID for this band.\n",
+                              scan_ch->channel);
+                       continue;
+               }
+
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+               /* If passive , set up for auto-switch
+                *  and use long active_dwell time.
+                */
+               if (!is_active || il_is_channel_passive(ch_info) ||
+                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+                       scan_ch->type = 0;      /* passive */
+                       if (IL_UCODE_API(il->ucode_ver) == 1)
+                               scan_ch->active_dwell =
+                                   cpu_to_le16(passive_dwell - 1);
+               } else {
+                       scan_ch->type = 1;      /* active */
+               }
+
+               /* Set direct probe bits. These may be used both for active
+                * scan channels (probes gets sent right away),
+                * or for passive channels (probes get se sent only after
+                * hearing clear Rx packet).*/
+               if (IL_UCODE_API(il->ucode_ver) >= 2) {
+                       if (n_probes)
+                               scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+               } else {
+                       /* uCode v1 does not allow setting direct probe bits on
+                        * passive channel. */
+                       if ((scan_ch->type & 1) && n_probes)
+                               scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+               }
+
+               /* Set txpower levels to defaults */
+               scan_ch->tpc.dsp_atten = 110;
+               /* scan_pwr_info->tpc.dsp_atten; */
+
+               /*scan_pwr_info->tpc.tx_gain; */
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else {
+                       scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+                       /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+                        * power level:
+                        * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+                        */
+               }
+
+               D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+                      (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+                      (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+               scan_ch++;
+               added++;
+       }
+
+       D_SCAN("total channels to scan %d\n", added);
+       return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+       int i;
+
+       for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+               rates[i].bitrate = il3945_rates[i].ieee * 5;
+               rates[i].hw_value = i;  /* Rate scaling will work on idxes */
+               rates[i].hw_value_short = i;
+               rates[i].flags = 0;
+               if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+                       /*
+                        * If CCK != 1M then set short preamble rate flag.
+                        */
+                       rates[i].flags |=
+                           (il3945_rates[i].plcp ==
+                            10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+               }
+       }
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+       il_free_fw_desc(il->pci_dev, &il->ucode_code);
+       il_free_fw_desc(il->pci_dev, &il->ucode_data);
+       il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+       il_free_fw_desc(il->pci_dev, &il->ucode_init);
+       il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+       il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+       u32 val;
+       u32 save_len = len;
+       int rc = 0;
+       u32 errcnt;
+
+       D_INFO("ucode inst image size is %u\n", len);
+
+       il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+       errcnt = 0;
+       for (; len > 0; len -= sizeof(u32), image++) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IL_DL_IO is set */
+               val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       IL_ERR("uCode INST section is invalid at "
+                              "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                              save_len - len, val, le32_to_cpu(*image));
+                       rc = -EIO;
+                       errcnt++;
+                       if (errcnt >= 20)
+                               break;
+               }
+       }
+
+       if (!errcnt)
+               D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+       return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+       u32 val;
+       int rc = 0;
+       u32 errcnt = 0;
+       u32 i;
+
+       D_INFO("ucode inst image size is %u\n", len);
+
+       for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IL_DL_IO is set */
+               il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+               val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+#if 0                          /* Enable this if you want to see details */
+                       IL_ERR("uCode INST section is invalid at "
+                              "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+                              *image);
+#endif
+                       rc = -EIO;
+                       errcnt++;
+                       if (errcnt >= 3)
+                               break;
+               }
+       }
+
+       return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+       __le32 *image;
+       u32 len;
+       int rc = 0;
+
+       /* Try bootstrap */
+       image = (__le32 *) il->ucode_boot.v_addr;
+       len = il->ucode_boot.len;
+       rc = il3945_verify_inst_sparse(il, image, len);
+       if (rc == 0) {
+               D_INFO("Bootstrap uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try initialize */
+       image = (__le32 *) il->ucode_init.v_addr;
+       len = il->ucode_init.len;
+       rc = il3945_verify_inst_sparse(il, image, len);
+       if (rc == 0) {
+               D_INFO("Initialize uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try runtime/protocol */
+       image = (__le32 *) il->ucode_code.v_addr;
+       len = il->ucode_code.len;
+       rc = il3945_verify_inst_sparse(il, image, len);
+       if (rc == 0) {
+               D_INFO("Runtime uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+       /* Since nothing seems to match, show first several data entries in
+        * instruction SRAM, so maybe visual inspection will give a clue.
+        * Selection of bootstrap image (vs. other images) is arbitrary. */
+       image = (__le32 *) il->ucode_boot.v_addr;
+       len = il->ucode_boot.len;
+       rc = il3945_verify_inst_full(il, image, len);
+
+       return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+       /* Remove all resets to allow NIC to operate */
+       _il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item)                                         \
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{                                                                      \
+       return le32_to_cpu(ucode->v1.item);                             \
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+       return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+       return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+       const struct il_ucode_header *ucode;
+       int ret = -EINVAL, idx;
+       const struct firmware *ucode_raw;
+       /* firmware file name contains uCode/driver compatibility version */
+       const char *name_pre = il->cfg->fw_name_pre;
+       const unsigned int api_max = il->cfg->ucode_api_max;
+       const unsigned int api_min = il->cfg->ucode_api_min;
+       char buf[25];
+       u8 *src;
+       size_t len;
+       u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+       /* Ask kernel firmware_class module to get the boot firmware off disk.
+        * request_firmware() is synchronous, file is in memory on return. */
+       for (idx = api_max; idx >= api_min; idx--) {
+               sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+               ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+               if (ret < 0) {
+                       IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+                       if (ret == -ENOENT)
+                               continue;
+                       else
+                               goto error;
+               } else {
+                       if (idx < api_max)
+                               IL_ERR("Loaded firmware %s, "
+                                      "which is deprecated. "
+                                      " Please use API v%u instead.\n", buf,
+                                      api_max);
+                       D_INFO("Got firmware '%s' file "
+                              "(%zd bytes) from disk\n", buf, ucode_raw->size);
+                       break;
+               }
+       }
+
+       if (ret < 0)
+               goto error;
+
+       /* Make sure that we got at least our header! */
+       if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+               IL_ERR("File size way too small!\n");
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       /* Data from ucode file:  header followed by uCode images */
+       ucode = (struct il_ucode_header *)ucode_raw->data;
+
+       il->ucode_ver = le32_to_cpu(ucode->ver);
+       api_ver = IL_UCODE_API(il->ucode_ver);
+       inst_size = il3945_ucode_get_inst_size(ucode);
+       data_size = il3945_ucode_get_data_size(ucode);
+       init_size = il3945_ucode_get_init_size(ucode);
+       init_data_size = il3945_ucode_get_init_data_size(ucode);
+       boot_size = il3945_ucode_get_boot_size(ucode);
+       src = il3945_ucode_get_data(ucode);
+
+       /* api_ver should match the api version forming part of the
+        * firmware filename ... but we don't check for that and only rely
+        * on the API version read from firmware header from here on forward */
+
+       if (api_ver < api_min || api_ver > api_max) {
+               IL_ERR("Driver unable to support your firmware API. "
+                      "Driver supports v%u, firmware is v%u.\n", api_max,
+                      api_ver);
+               il->ucode_ver = 0;
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (api_ver != api_max)
+               IL_ERR("Firmware has old API version. Expected %u, "
+                      "got %u. New firmware can be obtained "
+                      "from http://www.intellinuxwireless.org.\n", api_max,
+                      api_ver);
+
+       IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+               IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+               IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+       snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+                "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+                IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+                IL_UCODE_SERIAL(il->ucode_ver));
+
+       D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+       D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+       D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+       D_INFO("f/w package hdr init inst size = %u\n", init_size);
+       D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+       D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+       /* Verify size of file vs. image size info in file's header */
+       if (ucode_raw->size !=
+           il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+           init_size + init_data_size + boot_size) {
+
+               D_INFO("uCode file size %zd does not match expected size\n",
+                      ucode_raw->size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       /* Verify that uCode images will fit in card's SRAM */
+       if (inst_size > IL39_MAX_INST_SIZE) {
+               D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       if (data_size > IL39_MAX_DATA_SIZE) {
+               D_INFO("uCode data len %d too large to fit in\n", data_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (init_size > IL39_MAX_INST_SIZE) {
+               D_INFO("uCode init instr len %d too large to fit in\n",
+                      init_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (init_data_size > IL39_MAX_DATA_SIZE) {
+               D_INFO("uCode init data len %d too large to fit in\n",
+                      init_data_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+       if (boot_size > IL39_MAX_BSM_SIZE) {
+               D_INFO("uCode boot instr len %d too large to fit in\n",
+                      boot_size);
+               ret = -EINVAL;
+               goto err_release;
+       }
+
+       /* Allocate ucode buffers for card's bus-master loading ... */
+
+       /* Runtime instructions and 2 copies of data:
+        * 1) unmodified from disk
+        * 2) backup cache for save/restore during power-downs */
+       il->ucode_code.len = inst_size;
+       il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+       il->ucode_data.len = data_size;
+       il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+       il->ucode_data_backup.len = data_size;
+       il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+       if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+           !il->ucode_data_backup.v_addr)
+               goto err_pci_alloc;
+
+       /* Initialization instructions and data */
+       if (init_size && init_data_size) {
+               il->ucode_init.len = init_size;
+               il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+               il->ucode_init_data.len = init_data_size;
+               il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+               if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Bootstrap (instructions only, no data) */
+       if (boot_size) {
+               il->ucode_boot.len = boot_size;
+               il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+               if (!il->ucode_boot.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Copy images into buffers for card's bus-master reads ... */
+
+       /* Runtime instructions (first block of data in file) */
+       len = inst_size;
+       D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+       memcpy(il->ucode_code.v_addr, src, len);
+       src += len;
+
+       D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+              il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+       /* Runtime data (2nd block)
+        * NOTE:  Copy into backup buffer will be done in il3945_up()  */
+       len = data_size;
+       D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+       memcpy(il->ucode_data.v_addr, src, len);
+       memcpy(il->ucode_data_backup.v_addr, src, len);
+       src += len;
+
+       /* Initialization instructions (3rd block) */
+       if (init_size) {
+               len = init_size;
+               D_INFO("Copying (but not loading) init instr len %zd\n", len);
+               memcpy(il->ucode_init.v_addr, src, len);
+               src += len;
+       }
+
+       /* Initialization data (4th block) */
+       if (init_data_size) {
+               len = init_data_size;
+               D_INFO("Copying (but not loading) init data len %zd\n", len);
+               memcpy(il->ucode_init_data.v_addr, src, len);
+               src += len;
+       }
+
+       /* Bootstrap instructions (5th block) */
+       len = boot_size;
+       D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+       memcpy(il->ucode_boot.v_addr, src, len);
+
+       /* We have our copies now, allow OS release its copies */
+       release_firmware(ucode_raw);
+       return 0;
+
+err_pci_alloc:
+       IL_ERR("failed to allocate pci memory\n");
+       ret = -ENOMEM;
+       il3945_dealloc_ucode_pci(il);
+
+err_release:
+       release_firmware(ucode_raw);
+
+error:
+       return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+
+       /* bits 31:0 for 3945 */
+       pinst = il->ucode_code.p_addr;
+       pdata = il->ucode_data_backup.p_addr;
+
+       /* Tell bootstrap uCode where to find image to load */
+       il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+       il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+       il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+       /* Inst byte count must be last to set up, bit 31 signals uCode
+        *   that all new ptr/size info is in place */
+       il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+                  il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+       D_INFO("Runtime uCode pointers are set.\n");
+
+       return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+       /* Check alive response for "valid" sign from uCode */
+       if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               D_INFO("Initialize Alive failed.\n");
+               goto restart;
+       }
+
+       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "initialize" alive if code weren't properly loaded.  */
+       if (il3945_verify_ucode(il)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               D_INFO("Bad \"initialize\" uCode load.\n");
+               goto restart;
+       }
+
+       /* Send pointers to protocol/runtime uCode image ... init code will
+        * load and launch runtime uCode, which will send us another "Alive"
+        * notification. */
+       D_INFO("Initialization Alive received.\n");
+       if (il3945_set_ucode_ptrs(il)) {
+               /* Runtime instruction load won't happen;
+                * take it all the way back down so we can try again */
+               D_INFO("Couldn't set up uCode pointers.\n");
+               goto restart;
+       }
+       return;
+
+restart:
+       queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+       int thermal_spin = 0;
+       u32 rfkill;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       D_INFO("Runtime Alive received.\n");
+
+       if (il->card_alive.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               D_INFO("Alive failed.\n");
+               goto restart;
+       }
+
+       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "runtime" alive if code weren't properly loaded.  */
+       if (il3945_verify_ucode(il)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               D_INFO("Bad runtime uCode load.\n");
+               goto restart;
+       }
+
+       rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+       D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+       if (rfkill & 0x1) {
+               clear_bit(S_RF_KILL_HW, &il->status);
+               /* if RFKILL is not on, then wait for thermal
+                * sensor in adapter to kick in */
+               while (il3945_hw_get_temperature(il) == 0) {
+                       thermal_spin++;
+                       udelay(10);
+               }
+
+               if (thermal_spin)
+                       D_INFO("Thermal calibration took %dus\n",
+                              thermal_spin * 10);
+       } else
+               set_bit(S_RF_KILL_HW, &il->status);
+
+       /* After the ALIVE response, we can send commands to 3945 uCode */
+       set_bit(S_ALIVE, &il->status);
+
+       /* Enable watchdog to monitor the driver tx queues */
+       il_setup_watchdog(il);
+
+       if (il_is_rfkill(il))
+               return;
+
+       ieee80211_wake_queues(il->hw);
+
+       il->active_rate = RATES_MASK_3945;
+
+       il_power_update_mode(il, true);
+
+       if (il_is_associated(il)) {
+               struct il3945_rxon_cmd *active_rxon =
+                   (struct il3945_rxon_cmd *)(&ctx->active);
+
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       } else {
+               /* Initialize our rx_config data */
+               il_connection_init_rx_config(il, ctx);
+       }
+
+       /* Configure Bluetooth device coexistence support */
+       il_send_bt_config(il);
+
+       set_bit(S_READY, &il->status);
+
+       /* Configure the adapter for unassociated operation */
+       il3945_commit_rxon(il, ctx);
+
+       il3945_reg_txpower_periodic(il);
+
+       D_INFO("ALIVE processing complete.\n");
+       wake_up(&il->wait_command_queue);
+
+       return;
+
+restart:
+       queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+       unsigned long flags;
+       int exit_pending;
+
+       D_INFO(DRV_NAME " is going down\n");
+
+       il_scan_cancel_timeout(il, 200);
+
+       exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+       /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+        * to prevent rearm timer */
+       del_timer_sync(&il->watchdog);
+
+       /* Station information will now be cleared in device */
+       il_clear_ucode_stations(il, NULL);
+       il_dealloc_bcast_stations(il);
+       il_clear_driver_stations(il);
+
+       /* Unblock any waiting calls */
+       wake_up_all(&il->wait_command_queue);
+
+       /* Wipe out the EXIT_PENDING status bit if we are not actually
+        * exiting the module */
+       if (!exit_pending)
+               clear_bit(S_EXIT_PENDING, &il->status);
+
+       /* stop and reset the on-board processor */
+       _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /* tell the device to stop sending interrupts */
+       spin_lock_irqsave(&il->lock, flags);
+       il_disable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+       il3945_synchronize_irq(il);
+
+       if (il->mac80211_registered)
+               ieee80211_stop_queues(il->hw);
+
+       /* If we have not previously called il3945_init() then
+        * clear all bits but the RF Kill bits and return */
+       if (!il_is_init(il)) {
+               il->status =
+                   test_bit(S_RF_KILL_HW,
+                            &il->
+                            status) << S_RF_KILL_HW |
+                   test_bit(S_GEO_CONFIGURED,
+                            &il->
+                            status) << S_GEO_CONFIGURED |
+                   test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+               goto exit;
+       }
+
+       /* ...otherwise clear out all the status bits but the RF Kill
+        * bit and continue taking the NIC down. */
+       il->status &=
+           test_bit(S_RF_KILL_HW,
+                    &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+                                                            &il->
+                                                            status) <<
+           S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+                                       &il->
+                                       status) << S_FW_ERROR |
+           test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+       il3945_hw_txq_ctx_stop(il);
+       il3945_hw_rxq_stop(il);
+
+       /* Power-down device's busmaster DMA clocks */
+       il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(5);
+
+       /* Stop the device, and put it in low power state */
+       il_apm_stop(il);
+
+exit:
+       memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+       if (il->beacon_skb)
+               dev_kfree_skb(il->beacon_skb);
+       il->beacon_skb = NULL;
+
+       /* clear out any free frames */
+       il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+       mutex_lock(&il->mutex);
+       __il3945_down(il);
+       mutex_unlock(&il->mutex);
+
+       il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       unsigned long flags;
+       u8 sta_id;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Unable to prepare broadcast station\n");
+               spin_unlock_irqrestore(&il->sta_lock, flags);
+
+               return -EINVAL;
+       }
+
+       il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+       il->stations[sta_id].used |= IL_STA_BCAST;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+       int rc, i;
+
+       rc = il3945_alloc_bcast_station(il);
+       if (rc)
+               return rc;
+
+       if (test_bit(S_EXIT_PENDING, &il->status)) {
+               IL_WARN("Exit pending; will not bring the NIC up\n");
+               return -EIO;
+       }
+
+       if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+               IL_ERR("ucode not available for device bring up\n");
+               return -EIO;
+       }
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(S_RF_KILL_HW, &il->status);
+       else {
+               set_bit(S_RF_KILL_HW, &il->status);
+               IL_WARN("Radio disabled by HW RF Kill switch\n");
+               return -ENODEV;
+       }
+
+       _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+       rc = il3945_hw_nic_init(il);
+       if (rc) {
+               IL_ERR("Unable to int nic\n");
+               return rc;
+       }
+
+       /* make sure rfkill handshake bits are cleared */
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       /* clear (again), then enable host interrupts */
+       _il_wr(il, CSR_INT, 0xFFFFFFFF);
+       il_enable_interrupts(il);
+
+       /* really make sure rfkill handshake bits are cleared */
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+       /* Copy original ucode data image from disk into backup cache.
+        * This will be used to initialize the on-board processor's
+        * data SRAM for a clean start when the runtime program first loads. */
+       memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+              il->ucode_data.len);
+
+       /* We return success when we resume from suspend and rf_kill is on. */
+       if (test_bit(S_RF_KILL_HW, &il->status))
+               return 0;
+
+       for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+               /* load bootstrap state machine,
+                * load bootstrap program into processor's memory,
+                * prepare to load the "initialize" uCode */
+               rc = il->cfg->ops->lib->load_ucode(il);
+
+               if (rc) {
+                       IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+                       continue;
+               }
+
+               /* start card; "initialize" will load runtime ucode */
+               il3945_nic_start(il);
+
+               D_INFO(DRV_NAME " is coming up\n");
+
+               return 0;
+       }
+
+       set_bit(S_EXIT_PENDING, &il->status);
+       __il3945_down(il);
+       clear_bit(S_EXIT_PENDING, &il->status);
+
+       /* tried to restart and config the device for as long as our
+        * patience could withstand */
+       IL_ERR("Unable to initialize device after %d attempts.\n", i);
+       return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+       struct il_priv *il =
+           container_of(data, struct il_priv, init_alive_start.work);
+
+       mutex_lock(&il->mutex);
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               goto out;
+
+       il3945_init_alive_start(il);
+out:
+       mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+       struct il_priv *il =
+           container_of(data, struct il_priv, alive_start.work);
+
+       mutex_lock(&il->mutex);
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               goto out;
+
+       il3945_alive_start(il);
+out:
+       mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change.  This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+       struct il_priv *il =
+           container_of(data, struct il_priv, _3945.rfkill_poll.work);
+       bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+       bool new_rfkill =
+           !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+       if (new_rfkill != old_rfkill) {
+               if (new_rfkill)
+                       set_bit(S_RF_KILL_HW, &il->status);
+               else
+                       clear_bit(S_RF_KILL_HW, &il->status);
+
+               wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+               D_RF_KILL("RF_KILL bit toggled to %s.\n",
+                         new_rfkill ? "disable radio" : "enable radio");
+       }
+
+       /* Keep this running, even if radio now enabled.  This will be
+        * cancelled in mac_start() if system decides to start again */
+       queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+                          round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+       struct il_host_cmd cmd = {
+               .id = C_SCAN,
+               .len = sizeof(struct il3945_scan_cmd),
+               .flags = CMD_SIZE_HUGE,
+       };
+       struct il3945_scan_cmd *scan;
+       u8 n_probes = 0;
+       enum ieee80211_band band;
+       bool is_active = false;
+       int ret;
+       u16 len;
+
+       lockdep_assert_held(&il->mutex);
+
+       if (!il->scan_cmd) {
+               il->scan_cmd =
+                   kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+                           GFP_KERNEL);
+               if (!il->scan_cmd) {
+                       D_SCAN("Fail to allocate scan memory\n");
+                       return -ENOMEM;
+               }
+       }
+       scan = il->scan_cmd;
+       memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+       scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+       scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+       if (il_is_associated(il)) {
+               u16 interval;
+               u32 extra;
+               u32 suspend_time = 100;
+               u32 scan_suspend_time = 100;
+
+               D_INFO("Scanning while associated...\n");
+
+               interval = vif->bss_conf.beacon_int;
+
+               scan->suspend_time = 0;
+               scan->max_out_time = cpu_to_le32(200 * 1024);
+               if (!interval)
+                       interval = suspend_time;
+               /*
+                * suspend time format:
+                *  0-19: beacon interval in usec (time before exec.)
+                * 20-23: 0
+                * 24-31: number of beacons (suspend between channels)
+                */
+
+               extra = (suspend_time / interval) << 24;
+               scan_suspend_time =
+                   0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+               scan->suspend_time = cpu_to_le32(scan_suspend_time);
+               D_SCAN("suspend_time 0x%X beacon interval %d\n",
+                      scan_suspend_time, interval);
+       }
+
+       if (il->scan_request->n_ssids) {
+               int i, p = 0;
+               D_SCAN("Kicking off active scan\n");
+               for (i = 0; i < il->scan_request->n_ssids; i++) {
+                       /* always does wildcard anyway */
+                       if (!il->scan_request->ssids[i].ssid_len)
+                               continue;
+                       scan->direct_scan[p].id = WLAN_EID_SSID;
+                       scan->direct_scan[p].len =
+                           il->scan_request->ssids[i].ssid_len;
+                       memcpy(scan->direct_scan[p].ssid,
+                              il->scan_request->ssids[i].ssid,
+                              il->scan_request->ssids[i].ssid_len);
+                       n_probes++;
+                       p++;
+               }
+               is_active = true;
+       } else
+               D_SCAN("Kicking off passive scan.\n");
+
+       /* We don't build a direct scan probe request; the uCode will do
+        * that based on the direct_mask added to each channel entry */
+       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+       scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       /* flags + rate selection */
+
+       switch (il->scan_band) {
+       case IEEE80211_BAND_2GHZ:
+               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+               scan->tx_cmd.rate = RATE_1M_PLCP;
+               band = IEEE80211_BAND_2GHZ;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               scan->tx_cmd.rate = RATE_6M_PLCP;
+               band = IEEE80211_BAND_5GHZ;
+               break;
+       default:
+               IL_WARN("Invalid scan band\n");
+               return -EIO;
+       }
+
+       /*
+        * If active scaning is requested but a certain channel
+        * is marked passive, we can do active scanning if we
+        * detect transmissions.
+        */
+       scan->good_CRC_th =
+           is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_DISABLED;
+
+       len =
+           il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+                             vif->addr, il->scan_request->ie,
+                             il->scan_request->ie_len,
+                             IL_MAX_SCAN_SIZE - sizeof(*scan));
+       scan->tx_cmd.len = cpu_to_le16(len);
+
+       /* select Rx antennas */
+       scan->flags |= il3945_get_antenna_flags(il);
+
+       scan->channel_count =
+           il3945_get_channels_for_scan(il, band, is_active, n_probes,
+                                        (void *)&scan->data[len], vif);
+       if (scan->channel_count == 0) {
+               D_SCAN("channel count %d\n", scan->channel_count);
+               return -EIO;
+       }
+
+       cmd.len +=
+           le16_to_cpu(scan->tx_cmd.len) +
+           scan->channel_count * sizeof(struct il3945_scan_channel);
+       cmd.data = scan;
+       scan->len = cpu_to_le16(cmd.len);
+
+       set_bit(S_SCAN_HW, &il->status);
+       ret = il_send_cmd_sync(il, &cmd);
+       if (ret)
+               clear_bit(S_SCAN_HW, &il->status);
+       return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+
+       /*
+        * Since setting the RXON may have been deferred while
+        * performing the scan, fire one off if needed
+        */
+       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+               il3945_commit_rxon(il, ctx);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+       struct il_priv *il = container_of(data, struct il_priv, restart);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+               mutex_lock(&il->mutex);
+               il->ctx.vif = NULL;
+               il->is_open = 0;
+               mutex_unlock(&il->mutex);
+               il3945_down(il);
+               ieee80211_restart_hw(il->hw);
+       } else {
+               il3945_down(il);
+
+               mutex_lock(&il->mutex);
+               if (test_bit(S_EXIT_PENDING, &il->status)) {
+                       mutex_unlock(&il->mutex);
+                       return;
+               }
+
+               __il3945_up(il);
+               mutex_unlock(&il->mutex);
+       }
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+       struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+       mutex_lock(&il->mutex);
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               goto out;
+
+       il3945_rx_replenish(il);
+out:
+       mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+       int rc = 0;
+       struct ieee80211_conf *conf = NULL;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       if (!ctx->vif || !il->is_open)
+               return;
+
+       D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
+               ctx->active.bssid_addr);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       il_scan_cancel_timeout(il, 200);
+
+       conf = &il->hw->conf;
+
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       il3945_commit_rxon(il, ctx);
+
+       rc = il_send_rxon_timing(il, ctx);
+       if (rc)
+               IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+       ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+       D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
+               ctx->vif->bss_conf.beacon_int);
+
+       if (ctx->vif->bss_conf.use_short_preamble)
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+               if (ctx->vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+       }
+
+       il3945_commit_rxon(il, ctx);
+
+       switch (ctx->vif->type) {
+       case NL80211_IFTYPE_STATION:
+               il3945_rate_scale_init(il->hw, IL_AP_ID);
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               il3945_send_beacon_cmd(il);
+               break;
+       default:
+               IL_ERR("%s Should not be called in %d mode\n", __func__,
+                      ctx->vif->type);
+               break;
+       }
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT    (2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+       struct il_priv *il = hw->priv;
+       int ret;
+
+       D_MAC80211("enter\n");
+
+       /* we should be verifying the device is ready to be opened */
+       mutex_lock(&il->mutex);
+
+       /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+        * ucode filename and max sizes are card-specific. */
+
+       if (!il->ucode_code.len) {
+               ret = il3945_read_ucode(il);
+               if (ret) {
+                       IL_ERR("Could not read microcode: %d\n", ret);
+                       mutex_unlock(&il->mutex);
+                       goto out_release_irq;
+               }
+       }
+
+       ret = __il3945_up(il);
+
+       mutex_unlock(&il->mutex);
+
+       if (ret)
+               goto out_release_irq;
+
+       D_INFO("Start UP work.\n");
+
+       /* Wait for START_ALIVE from ucode. Otherwise callbacks from
+        * mac80211 will not be run successfully. */
+       ret = wait_event_timeout(il->wait_command_queue,
+                                test_bit(S_READY, &il->status),
+                                UCODE_READY_TIMEOUT);
+       if (!ret) {
+               if (!test_bit(S_READY, &il->status)) {
+                       IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+                              jiffies_to_msecs(UCODE_READY_TIMEOUT));
+                       ret = -ETIMEDOUT;
+                       goto out_release_irq;
+               }
+       }
+
+       /* ucode is running and will send rfkill notifications,
+        * no need to poll the killswitch state anymore */
+       cancel_delayed_work(&il->_3945.rfkill_poll);
+
+       il->is_open = 1;
+       D_MAC80211("leave\n");
+       return 0;
+
+out_release_irq:
+       il->is_open = 0;
+       D_MAC80211("leave - failed\n");
+       return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+       struct il_priv *il = hw->priv;
+
+       D_MAC80211("enter\n");
+
+       if (!il->is_open) {
+               D_MAC80211("leave - skip\n");
+               return;
+       }
+
+       il->is_open = 0;
+
+       il3945_down(il);
+
+       flush_workqueue(il->workqueue);
+
+       /* start polling the killswitch state again */
+       queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+                          round_jiffies_relative(2 * HZ));
+
+       D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct il_priv *il = hw->priv;
+
+       D_MAC80211("enter\n");
+
+       D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+            ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+       if (il3945_tx_skb(il, skb))
+               dev_kfree_skb_any(skb);
+
+       D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       struct ieee80211_vif *vif = ctx->vif;
+       int rc = 0;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       /* The following should be done only at AP bring up */
+       if (!(il_is_associated(il))) {
+
+               /* RXON - unassoc (to set timing command) */
+               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+               il3945_commit_rxon(il, ctx);
+
+               /* RXON Timing */
+               rc = il_send_rxon_timing(il, ctx);
+               if (rc)
+                       IL_WARN("C_RXON_TIMING failed - "
+                               "Attempting to continue.\n");
+
+               ctx->staging.assoc_id = 0;
+
+               if (vif->bss_conf.use_short_preamble)
+                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+                       if (vif->bss_conf.use_short_slot)
+                               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+                       else
+                               ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+               }
+               /* restore RXON assoc */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               il3945_commit_rxon(il, ctx);
+       }
+       il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key)
+{
+       struct il_priv *il = hw->priv;
+       int ret = 0;
+       u8 sta_id = IL_INVALID_STATION;
+       u8 static_key;
+
+       D_MAC80211("enter\n");
+
+       if (il3945_mod_params.sw_crypto) {
+               D_MAC80211("leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       /*
+        * To support IBSS RSN, don't program group keys in IBSS, the
+        * hardware will then not attempt to decrypt the frames.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               return -EOPNOTSUPP;
+
+       static_key = !il_is_associated(il);
+
+       if (!static_key) {
+               sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
+               if (sta_id == IL_INVALID_STATION)
+                       return -EINVAL;
+       }
+
+       mutex_lock(&il->mutex);
+       il_scan_cancel_timeout(il, 100);
+
+       switch (cmd) {
+       case SET_KEY:
+               if (static_key)
+                       ret = il3945_set_static_key(il, key);
+               else
+                       ret = il3945_set_dynamic_key(il, key, sta_id);
+               D_MAC80211("enable hwcrypto key\n");
+               break;
+       case DISABLE_KEY:
+               if (static_key)
+                       ret = il3945_remove_static_key(il);
+               else
+                       ret = il3945_clear_sta_key_info(il, sta_id);
+               D_MAC80211("disable hwcrypto key\n");
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&il->mutex);
+       D_MAC80211("leave\n");
+
+       return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta)
+{
+       struct il_priv *il = hw->priv;
+       struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+       int ret;
+       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+       u8 sta_id;
+
+       D_INFO("received request to add station %pM\n", sta->addr);
+       mutex_lock(&il->mutex);
+       D_INFO("proceeding to add station %pM\n", sta->addr);
+       sta_priv->common.sta_id = IL_INVALID_STATION;
+
+       ret =
+           il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+       if (ret) {
+               IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+               /* Should we return success if return code is EEXIST ? */
+               mutex_unlock(&il->mutex);
+               return ret;
+       }
+
+       sta_priv->common.sta_id = sta_id;
+
+       /* Initialize rate scaling */
+       D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+       il3945_rs_rate_init(il, sta, sta_id);
+       mutex_unlock(&il->mutex);
+
+       return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+                       unsigned int *total_flags, u64 multicast)
+{
+       struct il_priv *il = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+       struct il_rxon_context *ctx = &il->ctx;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+                  *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&il->mutex);
+
+       ctx->staging.filter_flags &= ~filter_nand;
+       ctx->staging.filter_flags |= filter_or;
+
+       /*
+        * Not committing directly because hardware can perform a scan,
+        * but even if hw is ready, committing here breaks for some reason,
+        * we'll eventually commit the filter flags change anyway.
+        */
+
+       mutex_unlock(&il->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in il_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &=
+           FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+           FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+                       char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret)
+               IL_INFO("%s is not in hex or decimal form.\n", buf);
+       else {
+               il->debug_level = val;
+               if (il_alloc_traffic_mem(il))
+                       IL_ERR("Not enough memory to generate traffic log\n");
+       }
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
+                  il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+                       char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+                     const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       char *p = (char *)buf;
+       u32 val;
+
+       val = simple_strtoul(p, &p, 10);
+       if (p == buf)
+               IL_INFO(": %s is not in decimal form.\n", buf);
+       else
+               il3945_hw_reg_set_txpower(il, val);
+
+       return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
+                  il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       struct il_rxon_context *ctx = &il->ctx;
+
+       return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+                  const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       u32 flags = simple_strtoul(buf, NULL, 0);
+       struct il_rxon_context *ctx = &il->ctx;
+
+       mutex_lock(&il->mutex);
+       if (le32_to_cpu(ctx->staging.flags) != flags) {
+               /* Cancel any currently running scans... */
+               if (il_scan_cancel_timeout(il, 100))
+                       IL_WARN("Could not cancel scan.\n");
+               else {
+                       D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+                       ctx->staging.flags = cpu_to_le32(flags);
+                       il3945_commit_rxon(il, ctx);
+               }
+       }
+       mutex_unlock(&il->mutex);
+
+       return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
+                  il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+                        char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       struct il_rxon_context *ctx = &il->ctx;
+
+       return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+                         const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       struct il_rxon_context *ctx = &il->ctx;
+       u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+       mutex_lock(&il->mutex);
+       if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+               /* Cancel any currently running scans... */
+               if (il_scan_cancel_timeout(il, 100))
+                       IL_WARN("Could not cancel scan.\n");
+               else {
+                       D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+                              filter_flags);
+                       ctx->staging.filter_flags = cpu_to_le32(filter_flags);
+                       il3945_commit_rxon(il, ctx);
+               }
+       }
+       mutex_unlock(&il->mutex);
+
+       return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
+                  il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+                       char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       struct il_spectrum_notification measure_report;
+       u32 size = sizeof(measure_report), len = 0, ofs = 0;
+       u8 *data = (u8 *) &measure_report;
+       unsigned long flags;
+
+       spin_lock_irqsave(&il->lock, flags);
+       if (!(il->measurement_status & MEASUREMENT_READY)) {
+               spin_unlock_irqrestore(&il->lock, flags);
+               return 0;
+       }
+       memcpy(&measure_report, &il->measure_report, size);
+       il->measurement_status = 0;
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       while (size && PAGE_SIZE - len) {
+               hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+                                  PAGE_SIZE - len, 1);
+               len = strlen(buf);
+               if (PAGE_SIZE - len)
+                       buf[len++] = '\n';
+
+               ofs += 16;
+               size -= min(size, 16U);
+       }
+
+       return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       struct il_rxon_context *ctx = &il->ctx;
+       struct ieee80211_measurement_params params = {
+               .channel = le16_to_cpu(ctx->active.channel),
+               .start_time = cpu_to_le64(il->_3945.last_tsf),
+               .duration = cpu_to_le16(1),
+       };
+       u8 type = IL_MEASURE_BASIC;
+       u8 buffer[32];
+       u8 channel;
+
+       if (count) {
+               char *p = buffer;
+               strncpy(buffer, buf, min(sizeof(buffer), count));
+               channel = simple_strtoul(p, NULL, 0);
+               if (channel)
+                       params.channel = channel;
+
+               p = buffer;
+               while (*p && *p != ' ')
+                       p++;
+               if (*p)
+                       type = simple_strtoul(p + 1, NULL, 0);
+       }
+
+       D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+              type, params.channel, buf);
+       il3945_get_measurement(il, &params, type);
+
+       return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
+                  il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+
+       il->retry_rate = simple_strtoul(buf, NULL, 0);
+       if (il->retry_rate <= 0)
+               il->retry_rate = 1;
+
+       return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+                      char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
+                  il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+       /* all this shit doesn't belong into sysfs anyway */
+       return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+                    const char *buf, size_t count)
+{
+       struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+       int ant;
+
+       if (count == 0)
+               return 0;
+
+       if (sscanf(buf, "%1i", &ant) != 1) {
+               D_INFO("not in hex or decimal form.\n");
+               return count;
+       }
+
+       if (ant >= 0 && ant <= 2) {
+               D_INFO("Setting antenna select to %d.\n", ant);
+               il3945_mod_params.antenna = (enum il3945_antenna)ant;
+       } else
+               D_INFO("Bad antenna select value %d.\n", ant);
+
+       return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
+                  il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       if (!il_is_alive(il))
+               return -EAGAIN;
+       return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+                     const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       char *p = (char *)buf;
+
+       if (p[0] == '1')
+               il3945_dump_nic_error_log(il);
+
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+       il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+       init_waitqueue_head(&il->wait_command_queue);
+
+       INIT_WORK(&il->restart, il3945_bg_restart);
+       INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+       INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+       INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+       INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+       il_setup_scan_deferred_work(il);
+
+       il3945_hw_setup_deferred_work(il);
+
+       init_timer(&il->watchdog);
+       il->watchdog.data = (unsigned long)il;
+       il->watchdog.function = il_bg_watchdog;
+
+       tasklet_init(&il->irq_tasklet,
+                    (void (*)(unsigned long))il3945_irq_tasklet,
+                    (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+       il3945_hw_cancel_deferred_work(il);
+
+       cancel_delayed_work_sync(&il->init_alive_start);
+       cancel_delayed_work(&il->alive_start);
+
+       il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+       &dev_attr_antenna.attr,
+       &dev_attr_channels.attr,
+       &dev_attr_dump_errors.attr,
+       &dev_attr_flags.attr,
+       &dev_attr_filter_flags.attr,
+       &dev_attr_measurement.attr,
+       &dev_attr_retry_rate.attr,
+       &dev_attr_status.attr,
+       &dev_attr_temperature.attr,
+       &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+       &dev_attr_debug_level.attr,
+#endif
+       NULL
+};
+
+static struct attribute_group il3945_attribute_group = {
+       .name = NULL,           /* put in device directory */
+       .attrs = il3945_sysfs_entries,
+};
+
+struct ieee80211_ops il3945_hw_ops = {
+       .tx = il3945_mac_tx,
+       .start = il3945_mac_start,
+       .stop = il3945_mac_stop,
+       .add_interface = il_mac_add_interface,
+       .remove_interface = il_mac_remove_interface,
+       .change_interface = il_mac_change_interface,
+       .config = il_mac_config,
+       .configure_filter = il3945_configure_filter,
+       .set_key = il3945_mac_set_key,
+       .conf_tx = il_mac_conf_tx,
+       .reset_tsf = il_mac_reset_tsf,
+       .bss_info_changed = il_mac_bss_info_changed,
+       .hw_scan = il_mac_hw_scan,
+       .sta_add = il3945_mac_sta_add,
+       .sta_remove = il_mac_sta_remove,
+       .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+       int ret;
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+       il->retry_rate = 1;
+       il->beacon_skb = NULL;
+
+       spin_lock_init(&il->sta_lock);
+       spin_lock_init(&il->hcmd_lock);
+
+       INIT_LIST_HEAD(&il->free_frames);
+
+       mutex_init(&il->mutex);
+
+       il->ieee_channels = NULL;
+       il->ieee_rates = NULL;
+       il->band = IEEE80211_BAND_2GHZ;
+
+       il->iw_mode = NL80211_IFTYPE_STATION;
+       il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+       /* initialize force reset */
+       il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+       if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+               IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+                       eeprom->version);
+               ret = -EINVAL;
+               goto err;
+       }
+       ret = il_init_channel_map(il);
+       if (ret) {
+               IL_ERR("initializing regulatory failed: %d\n", ret);
+               goto err;
+       }
+
+       /* Set up txpower settings in driver for all channels */
+       if (il3945_txpower_set_from_eeprom(il)) {
+               ret = -EIO;
+               goto err_free_channel_map;
+       }
+
+       ret = il_init_geos(il);
+       if (ret) {
+               IL_ERR("initializing geos failed: %d\n", ret);
+               goto err_free_channel_map;
+       }
+       il3945_init_hw_rates(il, il->ieee_rates);
+
+       return 0;
+
+err_free_channel_map:
+       il_free_channel_map(il);
+err:
+       return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST       200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+       int ret;
+       struct ieee80211_hw *hw = il->hw;
+
+       hw->rate_control_algorithm = "iwl-3945-rs";
+       hw->sta_data_size = sizeof(struct il3945_sta_priv);
+       hw->vif_data_size = sizeof(struct il_vif_priv);
+
+       /* Tell mac80211 our characteristics */
+       hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+
+       hw->wiphy->interface_modes = il->ctx.interface_modes;
+
+       hw->wiphy->flags |=
+           WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+           WIPHY_FLAG_IBSS_RSN;
+
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+       /* we create the 802.11 header and a zero-length SSID element */
+       hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+       /* Default value; 4 EDCA QOS priorities */
+       hw->queues = 4;
+
+       if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+               il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                   &il->bands[IEEE80211_BAND_2GHZ];
+
+       if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+               il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                   &il->bands[IEEE80211_BAND_5GHZ];
+
+       il_leds_init(il);
+
+       ret = ieee80211_register_hw(il->hw);
+       if (ret) {
+               IL_ERR("Failed to register hw (error %d)\n", ret);
+               return ret;
+       }
+       il->mac80211_registered = 1;
+
+       return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       int err = 0;
+       struct il_priv *il;
+       struct ieee80211_hw *hw;
+       struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+       struct il3945_eeprom *eeprom;
+       unsigned long flags;
+
+       /***********************
+        * 1. Allocating HW data
+        * ********************/
+
+       /* mac80211 allocates memory for this device instance, including
+        *   space for this driver's ilate structure */
+       hw = il_alloc_all(cfg);
+       if (hw == NULL) {
+               pr_err("Can not allocate network device\n");
+               err = -ENOMEM;
+               goto out;
+       }
+       il = hw->priv;
+       SET_IEEE80211_DEV(hw, &pdev->dev);
+
+       il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+       il->ctx.ctxid = 0;
+
+       il->ctx.rxon_cmd = C_RXON;
+       il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+       il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+       il->ctx.qos_cmd = C_QOS_PARAM;
+       il->ctx.ap_sta_id = IL_AP_ID;
+       il->ctx.wep_key_cmd = C_WEPKEY;
+       il->ctx.interface_modes =
+           BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+       il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+       il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+       il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+       /*
+        * Disabling hardware scan means that mac80211 will perform scans
+        * "the hard way", rather than using device's scan.
+        */
+       if (il3945_mod_params.disable_hw_scan) {
+               D_INFO("Disabling hw_scan\n");
+               il3945_hw_ops.hw_scan = NULL;
+       }
+
+       D_INFO("*** LOAD DRIVER ***\n");
+       il->cfg = cfg;
+       il->pci_dev = pdev;
+       il->inta_mask = CSR_INI_SET_MASK;
+
+       if (il_alloc_traffic_mem(il))
+               IL_ERR("Not enough memory to generate traffic log\n");
+
+       /***************************
+        * 2. Initializing PCI bus
+        * *************************/
+       pci_disable_link_state(pdev,
+                              PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                              PCIE_LINK_STATE_CLKPM);
+
+       if (pci_enable_device(pdev)) {
+               err = -ENODEV;
+               goto out_ieee80211_free_hw;
+       }
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (err) {
+               IL_WARN("No suitable DMA available.\n");
+               goto out_pci_disable_device;
+       }
+
+       pci_set_drvdata(pdev, il);
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err)
+               goto out_pci_disable_device;
+
+       /***********************
+        * 3. Read REV Register
+        * ********************/
+       il->hw_base = pci_iomap(pdev, 0, 0);
+       if (!il->hw_base) {
+               err = -ENODEV;
+               goto out_pci_release_regions;
+       }
+
+       D_INFO("pci_resource_len = 0x%08llx\n",
+              (unsigned long long)pci_resource_len(pdev, 0));
+       D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+       /* We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state */
+       pci_write_config_byte(pdev, 0x41, 0x00);
+
+       /* these spin locks will be used in apm_ops.init and EEPROM access
+        * we should init now
+        */
+       spin_lock_init(&il->reg_lock);
+       spin_lock_init(&il->lock);
+
+       /*
+        * stop and reset the on-board processor just in case it is in a
+        * strange state ... like being left stranded by a primary kernel
+        * and this is now the kdump kernel trying to start up
+        */
+       _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /***********************
+        * 4. Read EEPROM
+        * ********************/
+
+       /* Read the EEPROM */
+       err = il_eeprom_init(il);
+       if (err) {
+               IL_ERR("Unable to init EEPROM\n");
+               goto out_iounmap;
+       }
+       /* MAC Address location in EEPROM same for 3945/4965 */
+       eeprom = (struct il3945_eeprom *)il->eeprom;
+       D_INFO("MAC address: %pM\n", eeprom->mac_address);
+       SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+       /***********************
+        * 5. Setup HW Constants
+        * ********************/
+       /* Device-specific setup */
+       if (il3945_hw_set_hw_params(il)) {
+               IL_ERR("failed to set hw settings\n");
+               goto out_eeprom_free;
+       }
+
+       /***********************
+        * 6. Setup il
+        * ********************/
+
+       err = il3945_init_drv(il);
+       if (err) {
+               IL_ERR("initializing driver failed\n");
+               goto out_unset_hw_params;
+       }
+
+       IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+       /***********************
+        * 7. Setup Services
+        * ********************/
+
+       spin_lock_irqsave(&il->lock, flags);
+       il_disable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       pci_enable_msi(il->pci_dev);
+
+       err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+       if (err) {
+               IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+               goto out_disable_msi;
+       }
+
+       err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+       if (err) {
+               IL_ERR("failed to create sysfs device attributes\n");
+               goto out_release_irq;
+       }
+
+       il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
+                           &il->ctx);
+       il3945_setup_deferred_work(il);
+       il3945_setup_handlers(il);
+       il_power_initialize(il);
+
+       /*********************************
+        * 8. Setup and Register mac80211
+        * *******************************/
+
+       il_enable_interrupts(il);
+
+       err = il3945_setup_mac(il);
+       if (err)
+               goto out_remove_sysfs;
+
+       err = il_dbgfs_register(il, DRV_NAME);
+       if (err)
+               IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+                      err);
+
+       /* Start monitoring the killswitch */
+       queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+       return 0;
+
+out_remove_sysfs:
+       destroy_workqueue(il->workqueue);
+       il->workqueue = NULL;
+       sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+       free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+       pci_disable_msi(il->pci_dev);
+       il_free_geos(il);
+       il_free_channel_map(il);
+out_unset_hw_params:
+       il3945_unset_hw_params(il);
+out_eeprom_free:
+       il_eeprom_free(il);
+out_iounmap:
+       pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+       pci_release_regions(pdev);
+out_pci_disable_device:
+       pci_set_drvdata(pdev, NULL);
+       pci_disable_device(pdev);
+out_ieee80211_free_hw:
+       il_free_traffic_mem(il);
+       ieee80211_free_hw(il->hw);
+out:
+       return err;
+}
+
+static void __devexit
+il3945_pci_remove(struct pci_dev *pdev)
+{
+       struct il_priv *il = pci_get_drvdata(pdev);
+       unsigned long flags;
+
+       if (!il)
+               return;
+
+       D_INFO("*** UNLOAD DRIVER ***\n");
+
+       il_dbgfs_unregister(il);
+
+       set_bit(S_EXIT_PENDING, &il->status);
+
+       il_leds_exit(il);
+
+       if (il->mac80211_registered) {
+               ieee80211_unregister_hw(il->hw);
+               il->mac80211_registered = 0;
+       } else {
+               il3945_down(il);
+       }
+
+       /*
+        * Make sure device is reset to low power before unloading driver.
+        * This may be redundant with il_down(), but there are paths to
+        * run il_down() without calling apm_ops.stop(), and there are
+        * paths to avoid running il_down() at all before leaving driver.
+        * This (inexpensive) call *makes sure* device is reset.
+        */
+       il_apm_stop(il);
+
+       /* make sure we flush any pending irq or
+        * tasklet for the driver
+        */
+       spin_lock_irqsave(&il->lock, flags);
+       il_disable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       il3945_synchronize_irq(il);
+
+       sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+       cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+       il3945_dealloc_ucode_pci(il);
+
+       if (il->rxq.bd)
+               il3945_rx_queue_free(il, &il->rxq);
+       il3945_hw_txq_ctx_free(il);
+
+       il3945_unset_hw_params(il);
+
+       /*netif_stop_queue(dev); */
+       flush_workqueue(il->workqueue);
+
+       /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+        * il->workqueue... so we can't take down the workqueue
+        * until now... */
+       destroy_workqueue(il->workqueue);
+       il->workqueue = NULL;
+       il_free_traffic_mem(il);
+
+       free_irq(pdev->irq, il);
+       pci_disable_msi(pdev);
+
+       pci_iounmap(pdev, il->hw_base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       il_free_channel_map(il);
+       il_free_geos(il);
+       kfree(il->scan_cmd);
+       if (il->beacon_skb)
+               dev_kfree_skb(il->beacon_skb);
+
+       ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+       .name = DRV_NAME,
+       .id_table = il3945_hw_card_ids,
+       .probe = il3945_pci_probe,
+       .remove = __devexit_p(il3945_pci_remove),
+       .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+       int ret;
+       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+       pr_info(DRV_COPYRIGHT "\n");
+
+       ret = il3945_rate_control_register();
+       if (ret) {
+               pr_err("Unable to register rate control algorithm: %d\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&il3945_driver);
+       if (ret) {
+               pr_err("Unable to initialize PCI module\n");
+               goto error_register;
+       }
+
+       return ret;
+
+error_register:
+       il3945_rate_control_unregister();
+       return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+       pci_unregister_driver(&il3945_driver);
+       il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+                  S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644 (file)
index 0000000..30ad404
--- /dev/null
@@ -0,0 +1,995 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+       7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+       7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+       0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+       7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+       s8 min_rssi;
+       u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+       {-60, RATE_54M_IDX},
+       {-64, RATE_48M_IDX},
+       {-72, RATE_36M_IDX},
+       {-80, RATE_24M_IDX},
+       {-84, RATE_18M_IDX},
+       {-85, RATE_12M_IDX},
+       {-87, RATE_9M_IDX},
+       {-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+       {-60, RATE_54M_IDX},
+       {-64, RATE_48M_IDX},
+       {-68, RATE_36M_IDX},
+       {-80, RATE_24M_IDX},
+       {-84, RATE_18M_IDX},
+       {-85, RATE_12M_IDX},
+       {-86, RATE_11M_IDX},
+       {-88, RATE_5M_IDX},
+       {-90, RATE_2M_IDX},
+       {-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW          62
+#define RATE_FLUSH             (3*HZ)
+#define RATE_WIN_FLUSH       (HZ/2)
+#define IL39_RATE_HIGH_TH          11520
+#define IL_SUCCESS_UP_TH          8960
+#define IL_SUCCESS_DOWN_TH       10880
+#define RATE_MIN_FAILURE_TH       6
+#define RATE_MIN_SUCCESS_TH       8
+#define RATE_DECREASE_TH       1920
+#define RATE_RETRY_TH       15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+       u32 idx = 0;
+       u32 table_size = 0;
+       struct il3945_tpt_entry *tpt_table = NULL;
+
+       if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+               rssi = IL_MIN_RSSI_VAL;
+
+       switch (band) {
+       case IEEE80211_BAND_2GHZ:
+               tpt_table = il3945_tpt_table_g;
+               table_size = ARRAY_SIZE(il3945_tpt_table_g);
+               break;
+
+       case IEEE80211_BAND_5GHZ:
+               tpt_table = il3945_tpt_table_a;
+               table_size = ARRAY_SIZE(il3945_tpt_table_a);
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+
+       while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+               idx++;
+
+       idx = min(idx, (table_size - 1));
+
+       return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+       win->data = 0;
+       win->success_counter = 0;
+       win->success_ratio = -1;
+       win->counter = 0;
+       win->average_tpt = IL_INVALID_VALUE;
+       win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed.  If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+       int unflushed = 0;
+       int i;
+       unsigned long flags;
+       struct il_priv *il __maybe_unused = rs_sta->il;
+
+       /*
+        * For each rate, if we have collected data on that rate
+        * and it has been more than RATE_WIN_FLUSH
+        * since we flushed, clear out the gathered stats
+        */
+       for (i = 0; i < RATE_COUNT_3945; i++) {
+               if (!rs_sta->win[i].counter)
+                       continue;
+
+               spin_lock_irqsave(&rs_sta->lock, flags);
+               if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+                       D_RATE("flushing %d samples of rate " "idx %d\n",
+                              rs_sta->win[i].counter, i);
+                       il3945_clear_win(&rs_sta->win[i]);
+               } else
+                       unflushed++;
+               spin_unlock_irqrestore(&rs_sta->lock, flags);
+       }
+
+       return unflushed;
+}
+
+#define RATE_FLUSH_MAX              5000       /* msec */
+#define RATE_FLUSH_MIN              50 /* msec */
+#define IL_AVERAGE_PACKETS             1500
+
+static void
+il3945_bg_rate_scale_flush(unsigned long data)
+{
+       struct il3945_rs_sta *rs_sta = (void *)data;
+       struct il_priv *il __maybe_unused = rs_sta->il;
+       int unflushed = 0;
+       unsigned long flags;
+       u32 packet_count, duration, pps;
+
+       D_RATE("enter\n");
+
+       unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       /* Number of packets Rx'd since last time this timer ran */
+       packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+       rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+       if (unflushed) {
+               duration =
+                   jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+               D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+               /* Determine packets per second */
+               if (duration)
+                       pps = (packet_count * 1000) / duration;
+               else
+                       pps = 0;
+
+               if (pps) {
+                       duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+                       if (duration < RATE_FLUSH_MIN)
+                               duration = RATE_FLUSH_MIN;
+                       else if (duration > RATE_FLUSH_MAX)
+                               duration = RATE_FLUSH_MAX;
+               } else
+                       duration = RATE_FLUSH_MAX;
+
+               rs_sta->flush_time = msecs_to_jiffies(duration);
+
+               D_RATE("new flush period: %d msec ave %d\n", duration,
+                      packet_count);
+
+               mod_timer(&rs_sta->rate_scale_flush,
+                         jiffies + rs_sta->flush_time);
+
+               rs_sta->last_partial_flush = jiffies;
+       } else {
+               rs_sta->flush_time = RATE_FLUSH;
+               rs_sta->flush_pending = 0;
+       }
+       /* If there weren't any unflushed entries, we don't schedule the timer
+        * to run again */
+
+       rs_sta->last_flush = jiffies;
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate.  win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+                      struct il3945_rate_scale_data *win, int success,
+                      int retries, int idx)
+{
+       unsigned long flags;
+       s32 fail_count;
+       struct il_priv *il __maybe_unused = rs_sta->il;
+
+       if (!retries) {
+               D_RATE("leave: retries == 0 -- should be at least 1\n");
+               return;
+       }
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       /*
+        * Keep track of only the latest 62 tx frame attempts in this rate's
+        * history win; anything older isn't really relevant any more.
+        * If we have filled up the sliding win, drop the oldest attempt;
+        * if the oldest attempt (highest bit in bitmap) shows "success",
+        * subtract "1" from the success counter (this is the main reason
+        * we keep these bitmaps!).
+        * */
+       while (retries > 0) {
+               if (win->counter >= RATE_MAX_WINDOW) {
+
+                       /* remove earliest */
+                       win->counter = RATE_MAX_WINDOW - 1;
+
+                       if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+                               win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+                               win->success_counter--;
+                       }
+               }
+
+               /* Increment frames-attempted counter */
+               win->counter++;
+
+               /* Shift bitmap by one frame (throw away oldest history),
+                * OR in "1", and increment "success" if this
+                * frame was successful. */
+               win->data <<= 1;
+               if (success > 0) {
+                       win->success_counter++;
+                       win->data |= 0x1;
+                       success--;
+               }
+
+               retries--;
+       }
+
+       /* Calculate current success ratio, avoid divide-by-0! */
+       if (win->counter > 0)
+               win->success_ratio =
+                   128 * (100 * win->success_counter) / win->counter;
+       else
+               win->success_ratio = IL_INVALID_VALUE;
+
+       fail_count = win->counter - win->success_counter;
+
+       /* Calculate average throughput, if we have enough history. */
+       if (fail_count >= RATE_MIN_FAILURE_TH ||
+           win->success_counter >= RATE_MIN_SUCCESS_TH)
+               win->average_tpt =
+                   ((win->success_ratio * rs_sta->expected_tpt[idx] +
+                     64) / 128);
+       else
+               win->average_tpt = IL_INVALID_VALUE;
+
+       /* Tag this win as having been updated */
+       win->stamp = jiffies;
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+       struct ieee80211_hw *hw = il->hw;
+       struct ieee80211_conf *conf = &il->hw->conf;
+       struct il3945_sta_priv *psta;
+       struct il3945_rs_sta *rs_sta;
+       struct ieee80211_supported_band *sband;
+       int i;
+
+       D_INFO("enter\n");
+       if (sta_id == il->ctx.bcast_sta_id)
+               goto out;
+
+       psta = (struct il3945_sta_priv *)sta->drv_priv;
+       rs_sta = &psta->rs_sta;
+       sband = hw->wiphy->bands[conf->channel->band];
+
+       rs_sta->il = il;
+
+       rs_sta->start_rate = RATE_INVALID;
+
+       /* default to just 802.11b */
+       rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+       rs_sta->last_partial_flush = jiffies;
+       rs_sta->last_flush = jiffies;
+       rs_sta->flush_time = RATE_FLUSH;
+       rs_sta->last_tx_packets = 0;
+
+       rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+       rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
+
+       for (i = 0; i < RATE_COUNT_3945; i++)
+               il3945_clear_win(&rs_sta->win[i]);
+
+       /* TODO: what is a good starting rate for STA? About middle? Maybe not
+        * the lowest or the highest rate.. Could consider using RSSI from
+        * previous packets? Need to have IEEE 802.1X auth succeed immediately
+        * after assoc.. */
+
+       for (i = sband->n_bitrates - 1; i >= 0; i--) {
+               if (sta->supp_rates[sband->band] & (1 << i)) {
+                       rs_sta->last_txrate_idx = i;
+                       break;
+               }
+       }
+
+       il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+       /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+       if (sband->band == IEEE80211_BAND_5GHZ) {
+               rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+               il->_3945.sta_supp_rates =
+                   il->_3945.sta_supp_rates << IL_FIRST_OFDM_RATE;
+       }
+
+out:
+       il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+       D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+       return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+       return;
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+       struct il3945_rs_sta *rs_sta;
+       struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+       struct il_priv *il __maybe_unused = il_priv;
+
+       D_RATE("enter\n");
+
+       rs_sta = &psta->rs_sta;
+
+       spin_lock_init(&rs_sta->lock);
+       init_timer(&rs_sta->rate_scale_flush);
+
+       D_RATE("leave\n");
+
+       return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+       struct il3945_rs_sta *rs_sta = il_sta;
+
+       /*
+        * Be careful not to use any members of il3945_rs_sta (like trying
+        * to use il_priv to print out debugging) since it may not be fully
+        * initialized at this point.
+        */
+       del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+                   struct ieee80211_sta *sta, void *il_sta,
+                   struct sk_buff *skb)
+{
+       s8 retries = 0, current_count;
+       int scale_rate_idx, first_idx, last_idx;
+       unsigned long flags;
+       struct il_priv *il = (struct il_priv *)il_rate;
+       struct il3945_rs_sta *rs_sta = il_sta;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       D_RATE("enter\n");
+
+       retries = info->status.rates[0].count;
+       /* Sanity Check for retries */
+       if (retries > RATE_RETRY_TH)
+               retries = RATE_RETRY_TH;
+
+       first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+       if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+               D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+               return;
+       }
+
+       if (!il_sta) {
+               D_RATE("leave: No STA il data to update!\n");
+               return;
+       }
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (!rs_sta->il) {
+               D_RATE("leave: STA il data uninitialized!\n");
+               return;
+       }
+
+       rs_sta->tx_packets++;
+
+       scale_rate_idx = first_idx;
+       last_idx = first_idx;
+
+       /*
+        * Update the win for each rate.  We determine which rates
+        * were Tx'd based on the total number of retries vs. the number
+        * of retries configured for each rate -- currently set to the
+        * il value 'retry_rate' vs. rate specific
+        *
+        * On exit from this while loop last_idx indicates the rate
+        * at which the frame was finally transmitted (or failed if no
+        * ACK)
+        */
+       while (retries > 1) {
+               if ((retries - 1) < il->retry_rate) {
+                       current_count = (retries - 1);
+                       last_idx = scale_rate_idx;
+               } else {
+                       current_count = il->retry_rate;
+                       last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+               }
+
+               /* Update this rate accounting for as many retries
+                * as was used for it (per current_count) */
+               il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+                                      current_count, scale_rate_idx);
+               D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+                      current_count);
+
+               retries -= current_count;
+
+               scale_rate_idx = last_idx;
+       }
+
+       /* Update the last idx win with success/failure based on ACK */
+       D_RATE("Update rate %d with %s.\n", last_idx,
+              (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+       il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+                              info->flags & IEEE80211_TX_STAT_ACK, 1,
+                              last_idx);
+
+       /* We updated the rate scale win -- if its been more than
+        * flush_time since the last run, schedule the flush
+        * again */
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       if (!rs_sta->flush_pending &&
+           time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+               rs_sta->last_partial_flush = jiffies;
+               rs_sta->flush_pending = 1;
+               mod_timer(&rs_sta->rate_scale_flush,
+                         jiffies + rs_sta->flush_time);
+       }
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+                        enum ieee80211_band band)
+{
+       u8 high = RATE_INVALID;
+       u8 low = RATE_INVALID;
+       struct il_priv *il __maybe_unused = rs_sta->il;
+
+       /* 802.11A walks to the next literal adjacent rate in
+        * the rate table */
+       if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+               int i;
+               u32 mask;
+
+               /* Find the previous rate that is in the rate mask */
+               i = idx - 1;
+               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+                       if (rate_mask & mask) {
+                               low = i;
+                               break;
+                       }
+               }
+
+               /* Find the next rate that is in the rate mask */
+               i = idx + 1;
+               for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+                       if (rate_mask & mask) {
+                               high = i;
+                               break;
+                       }
+               }
+
+               return (high << 8) | low;
+       }
+
+       low = idx;
+       while (low != RATE_INVALID) {
+               if (rs_sta->tgg)
+                       low = il3945_rates[low].prev_rs_tgg;
+               else
+                       low = il3945_rates[low].prev_rs;
+               if (low == RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << low))
+                       break;
+               D_RATE("Skipping masked lower rate: %d\n", low);
+       }
+
+       high = idx;
+       while (high != RATE_INVALID) {
+               if (rs_sta->tgg)
+                       high = il3945_rates[high].next_rs_tgg;
+               else
+                       high = il3945_rates[high].next_rs;
+               if (high == RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << high))
+                       break;
+               D_RATE("Skipping masked higher rate: %d\n", high);
+       }
+
+       return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+                  struct ieee80211_tx_rate_control *txrc)
+{
+       struct ieee80211_supported_band *sband = txrc->sband;
+       struct sk_buff *skb = txrc->skb;
+       u8 low = RATE_INVALID;
+       u8 high = RATE_INVALID;
+       u16 high_low;
+       int idx;
+       struct il3945_rs_sta *rs_sta = il_sta;
+       struct il3945_rate_scale_data *win = NULL;
+       int current_tpt = IL_INVALID_VALUE;
+       int low_tpt = IL_INVALID_VALUE;
+       int high_tpt = IL_INVALID_VALUE;
+       u32 fail_count;
+       s8 scale_action = 0;
+       unsigned long flags;
+       u16 rate_mask;
+       s8 max_rate_idx = -1;
+       struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       D_RATE("enter\n");
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (rs_sta && !rs_sta->il) {
+               D_RATE("Rate scaling information not initialized yet.\n");
+               il_sta = NULL;
+       }
+
+       if (rate_control_send_low(sta, il_sta, txrc))
+               return;
+
+       rate_mask = sta->supp_rates[sband->band];
+
+       /* get user max rate if set */
+       max_rate_idx = txrc->max_rate_idx;
+       if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+               max_rate_idx += IL_FIRST_OFDM_RATE;
+       if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+               max_rate_idx = -1;
+
+       idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       /* for recent assoc, choose best rate regarding
+        * to rssi value
+        */
+       if (rs_sta->start_rate != RATE_INVALID) {
+               if (rs_sta->start_rate < idx &&
+                   (rate_mask & (1 << rs_sta->start_rate)))
+                       idx = rs_sta->start_rate;
+               rs_sta->start_rate = RATE_INVALID;
+       }
+
+       /* force user max rate if set by user */
+       if (max_rate_idx != -1 && max_rate_idx < idx) {
+               if (rate_mask & (1 << max_rate_idx))
+                       idx = max_rate_idx;
+       }
+
+       win = &(rs_sta->win[idx]);
+
+       fail_count = win->counter - win->success_counter;
+
+       if (fail_count < RATE_MIN_FAILURE_TH &&
+           win->success_counter < RATE_MIN_SUCCESS_TH) {
+               spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+               D_RATE("Invalid average_tpt on rate %d: "
+                      "counter: %d, success_counter: %d, "
+                      "expected_tpt is %sNULL\n", idx, win->counter,
+                      win->success_counter,
+                      rs_sta->expected_tpt ? "not " : "");
+
+               /* Can't calculate this yet; not enough history */
+               win->average_tpt = IL_INVALID_VALUE;
+               goto out;
+
+       }
+
+       current_tpt = win->average_tpt;
+
+       high_low =
+           il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+       low = high_low & 0xff;
+       high = (high_low >> 8) & 0xff;
+
+       /* If user set max rate, dont allow higher than user constrain */
+       if (max_rate_idx != -1 && max_rate_idx < high)
+               high = RATE_INVALID;
+
+       /* Collect Measured throughputs of adjacent rates */
+       if (low != RATE_INVALID)
+               low_tpt = rs_sta->win[low].average_tpt;
+
+       if (high != RATE_INVALID)
+               high_tpt = rs_sta->win[high].average_tpt;
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       scale_action = 0;
+
+       /* Low success ratio , need to drop the rate */
+       if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+               D_RATE("decrease rate because of low success_ratio\n");
+               scale_action = -1;
+               /* No throughput measured yet for adjacent rates,
+                * try increase */
+       } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+               if (high != RATE_INVALID &&
+                   win->success_ratio >= RATE_INCREASE_TH)
+                       scale_action = 1;
+               else if (low != RATE_INVALID)
+                       scale_action = 0;
+
+               /* Both adjacent throughputs are measured, but neither one has
+                * better throughput; we're using the best rate, don't change
+                * it! */
+       } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+                  && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+               D_RATE("No action -- low [%d] & high [%d] < "
+                      "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+               scale_action = 0;
+
+               /* At least one of the rates has better throughput */
+       } else {
+               if (high_tpt != IL_INVALID_VALUE) {
+
+                       /* High rate has better throughput, Increase
+                        * rate */
+                       if (high_tpt > current_tpt &&
+                           win->success_ratio >= RATE_INCREASE_TH)
+                               scale_action = 1;
+                       else {
+                               D_RATE("decrease rate because of high tpt\n");
+                               scale_action = 0;
+                       }
+               } else if (low_tpt != IL_INVALID_VALUE) {
+                       if (low_tpt > current_tpt) {
+                               D_RATE("decrease rate because of low tpt\n");
+                               scale_action = -1;
+                       } else if (win->success_ratio >= RATE_INCREASE_TH) {
+                               /* Lower rate has better
+                                * throughput,decrease rate */
+                               scale_action = 1;
+                       }
+               }
+       }
+
+       /* Sanity check; asked for decrease, but success rate or throughput
+        * has been good at old rate.  Don't change it. */
+       if (scale_action == -1 && low != RATE_INVALID &&
+           (win->success_ratio > RATE_HIGH_TH ||
+            current_tpt > 100 * rs_sta->expected_tpt[low]))
+               scale_action = 0;
+
+       switch (scale_action) {
+       case -1:
+
+               /* Decrese rate */
+               if (low != RATE_INVALID)
+                       idx = low;
+               break;
+
+       case 1:
+               /* Increase rate */
+               if (high != RATE_INVALID)
+                       idx = high;
+
+               break;
+
+       case 0:
+       default:
+               /* No change */
+               break;
+       }
+
+       D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+              low, high);
+
+out:
+
+       if (sband->band == IEEE80211_BAND_5GHZ) {
+               if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+                       idx = IL_FIRST_OFDM_RATE;
+               rs_sta->last_txrate_idx = idx;
+               info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+       } else {
+               rs_sta->last_txrate_idx = idx;
+               info->control.rates[0].idx = rs_sta->last_txrate_idx;
+       }
+
+       D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il3945_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int j;
+       ssize_t ret;
+       struct il3945_rs_sta *lq_sta = file->private_data;
+
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       desc +=
+           sprintf(buff + desc,
+                   "tx packets=%d last rate idx=%d\n"
+                   "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+                   lq_sta->last_txrate_idx, lq_sta->start_rate,
+                   jiffies_to_msecs(lq_sta->flush_time));
+       for (j = 0; j < RATE_COUNT_3945; j++) {
+               desc +=
+                   sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+                           lq_sta->win[j].counter,
+                           lq_sta->win[j].success_counter,
+                           lq_sta->win[j].success_ratio);
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+       .read = il3945_sta_dbgfs_stats_table_read,
+       .open = il3945_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+       struct il3945_rs_sta *lq_sta = il_sta;
+
+       lq_sta->rs_sta_dbgfs_stats_table_file =
+           debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+                               &rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+       struct il3945_rs_sta *lq_sta = il_sta;
+       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+       .module = NULL,
+       .name = RS_NAME,
+       .tx_status = il3945_rs_tx_status,
+       .get_rate = il3945_rs_get_rate,
+       .rate_init = il3945_rs_rate_init_stub,
+       .alloc = il3945_rs_alloc,
+       .free = il3945_rs_free,
+       .alloc_sta = il3945_rs_alloc_sta,
+       .free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+       .add_sta_debugfs = il3945_add_debugfs,
+       .remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+       struct il_priv *il = hw->priv;
+       s32 rssi = 0;
+       unsigned long flags;
+       struct il3945_rs_sta *rs_sta;
+       struct ieee80211_sta *sta;
+       struct il3945_sta_priv *psta;
+
+       D_RATE("enter\n");
+
+       rcu_read_lock();
+
+       sta =
+           ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+       if (!sta) {
+               D_RATE("Unable to find station to initialize rate scaling.\n");
+               rcu_read_unlock();
+               return;
+       }
+
+       psta = (void *)sta->drv_priv;
+       rs_sta = &psta->rs_sta;
+
+       spin_lock_irqsave(&rs_sta->lock, flags);
+
+       rs_sta->tgg = 0;
+       switch (il->band) {
+       case IEEE80211_BAND_2GHZ:
+               /* TODO: this always does G, not a regression */
+               if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+                       rs_sta->tgg = 1;
+                       rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+               } else
+                       rs_sta->expected_tpt = il3945_expected_tpt_g;
+               break;
+
+       case IEEE80211_BAND_5GHZ:
+               rs_sta->expected_tpt = il3945_expected_tpt_a;
+               break;
+       case IEEE80211_NUM_BANDS:
+               BUG();
+               break;
+       }
+
+       spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+       rssi = il->_3945.last_rx_rssi;
+       if (rssi == 0)
+               rssi = IL_MIN_RSSI_VAL;
+
+       D_RATE("Network RSSI: %d\n", rssi);
+
+       rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+       D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+              rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+       rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+       return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+       ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644 (file)
index 0000000..863664f
--- /dev/null
@@ -0,0 +1,2751 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+       struct il_host_cmd cmd = {
+               .id = C_LEDS,
+               .len = sizeof(struct il_led_cmd),
+               .data = led_cmd,
+               .flags = CMD_ASYNC,
+               .callback = NULL,
+       };
+
+       return il_send_cmd(il, &cmd);
+}
+
+const struct il_led_ops il3945_led_ops = {
+       .cmd = il3945_send_led_cmd,
+};
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
+       [RATE_##r##M_IDX] = { RATE_##r##M_PLCP,   \
+                                   RATE_##r##M_IEEE,   \
+                                   RATE_##ip##M_IDX, \
+                                   RATE_##in##M_IDX, \
+                                   RATE_##rp##M_IDX, \
+                                   RATE_##rn##M_IDX, \
+                                   RATE_##pp##M_IDX, \
+                                   RATE_##np##M_IDX, \
+                                   RATE_##r##M_IDX_TBL, \
+                                   RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ *   rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+       IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),        /*  1mbps */
+       IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),      /*  2mbps */
+       IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),    /*5.5mbps */
+       IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),  /* 11mbps */
+       IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),    /*  6mbps */
+       IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),   /*  9mbps */
+       IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),       /* 12mbps */
+       IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),       /* 18mbps */
+       IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),       /* 24mbps */
+       IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),       /* 36mbps */
+       IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),       /* 48mbps */
+       IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),    /* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+       u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+       if (rate == RATE_INVALID)
+               rate = rate_idx;
+       return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
+ *   Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging.  This function is just a placeholder as-is,
+ *   you'll need to provide the special bits! ...
+ *   ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+       int i;
+       u32 base;               /* SRAM address of event log header */
+       u32 disable_ptr;        /* SRAM address of event-disable bitmap array */
+       u32 array_size;         /* # of u32 entries in array */
+       static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+               0x00000000,     /*   31 -    0  Event id numbers */
+               0x00000000,     /*   63 -   32 */
+               0x00000000,     /*   95 -   64 */
+               0x00000000,     /*  127 -   96 */
+               0x00000000,     /*  159 -  128 */
+               0x00000000,     /*  191 -  160 */
+               0x00000000,     /*  223 -  192 */
+               0x00000000,     /*  255 -  224 */
+               0x00000000,     /*  287 -  256 */
+               0x00000000,     /*  319 -  288 */
+               0x00000000,     /*  351 -  320 */
+               0x00000000,     /*  383 -  352 */
+               0x00000000,     /*  415 -  384 */
+               0x00000000,     /*  447 -  416 */
+               0x00000000,     /*  479 -  448 */
+               0x00000000,     /*  511 -  480 */
+               0x00000000,     /*  543 -  512 */
+               0x00000000,     /*  575 -  544 */
+               0x00000000,     /*  607 -  576 */
+               0x00000000,     /*  639 -  608 */
+               0x00000000,     /*  671 -  640 */
+               0x00000000,     /*  703 -  672 */
+               0x00000000,     /*  735 -  704 */
+               0x00000000,     /*  767 -  736 */
+               0x00000000,     /*  799 -  768 */
+               0x00000000,     /*  831 -  800 */
+               0x00000000,     /*  863 -  832 */
+               0x00000000,     /*  895 -  864 */
+               0x00000000,     /*  927 -  896 */
+               0x00000000,     /*  959 -  928 */
+               0x00000000,     /*  991 -  960 */
+               0x00000000,     /* 1023 -  992 */
+               0x00000000,     /* 1055 - 1024 */
+               0x00000000,     /* 1087 - 1056 */
+               0x00000000,     /* 1119 - 1088 */
+               0x00000000,     /* 1151 - 1120 */
+               0x00000000,     /* 1183 - 1152 */
+               0x00000000,     /* 1215 - 1184 */
+               0x00000000,     /* 1247 - 1216 */
+               0x00000000,     /* 1279 - 1248 */
+               0x00000000,     /* 1311 - 1280 */
+               0x00000000,     /* 1343 - 1312 */
+               0x00000000,     /* 1375 - 1344 */
+               0x00000000,     /* 1407 - 1376 */
+               0x00000000,     /* 1439 - 1408 */
+               0x00000000,     /* 1471 - 1440 */
+               0x00000000,     /* 1503 - 1472 */
+       };
+
+       base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+       if (!il3945_hw_valid_rtc_data_addr(base)) {
+               IL_ERR("Invalid event log pointer 0x%08X\n", base);
+               return;
+       }
+
+       disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+       array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+       if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+               D_INFO("Disabling selected uCode log events at 0x%x\n",
+                      disable_ptr);
+               for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+                       il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+                                         evt_disable[i]);
+
+       } else {
+               D_INFO("Selected uCode log events may be disabled\n");
+               D_INFO("  by writing \"1\"s into disable bitmap\n");
+               D_INFO("  in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+                      array_size);
+       }
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+       int idx;
+
+       for (idx = 0; idx < RATE_COUNT_3945; idx++)
+               if (il3945_rates[idx].plcp == plcp)
+                       return idx;
+       return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+       switch (status & TX_STATUS_MSK) {
+       case TX_3945_STATUS_SUCCESS:
+               return "SUCCESS";
+               TX_STATUS_ENTRY(SHORT_LIMIT);
+               TX_STATUS_ENTRY(LONG_LIMIT);
+               TX_STATUS_ENTRY(FIFO_UNDERRUN);
+               TX_STATUS_ENTRY(MGMNT_ABORT);
+               TX_STATUS_ENTRY(NEXT_FRAG);
+               TX_STATUS_ENTRY(LIFE_EXPIRE);
+               TX_STATUS_ENTRY(DEST_PS);
+               TX_STATUS_ENTRY(ABORTED);
+               TX_STATUS_ENTRY(BT_RETRY);
+               TX_STATUS_ENTRY(STA_INVALID);
+               TX_STATUS_ENTRY(FRAG_DROPPED);
+               TX_STATUS_ENTRY(TID_DISABLE);
+               TX_STATUS_ENTRY(FRAME_FLUSHED);
+               TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+               TX_STATUS_ENTRY(TX_LOCKED);
+               TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+       }
+
+       return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+       return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+       int next_rate = il3945_get_prev_ieee_rate(rate);
+
+       switch (il->band) {
+       case IEEE80211_BAND_5GHZ:
+               if (rate == RATE_12M_IDX)
+                       next_rate = RATE_9M_IDX;
+               else if (rate == RATE_6M_IDX)
+                       next_rate = RATE_6M_IDX;
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+                   il_is_associated(il)) {
+                       if (rate == RATE_11M_IDX)
+                               next_rate = RATE_5M_IDX;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct il_queue *q = &txq->q;
+       struct il_tx_info *tx_info;
+
+       BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+       for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+            q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               tx_info = &txq->txb[txq->q.read_ptr];
+               ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+               tx_info->skb = NULL;
+               il->cfg->ops->lib->txq_free_tfd(il, txq);
+       }
+
+       if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+           txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+               il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int idx = SEQ_TO_IDX(sequence);
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct ieee80211_tx_info *info;
+       struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+       u32 status = le32_to_cpu(tx_resp->status);
+       int rate_idx;
+       int fail;
+
+       if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+               IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+                      "is out of range [0-%d] %d %d\n", txq_id, idx,
+                      txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+               return;
+       }
+
+       txq->time_stamp = jiffies;
+       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+       ieee80211_tx_info_clear_status(info);
+
+       /* Fill the MRR chain with some info about on-chip retransmissions */
+       rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rate_idx -= IL_FIRST_OFDM_RATE;
+
+       fail = tx_resp->failure_frame;
+
+       info->status.rates[0].idx = rate_idx;
+       info->status.rates[0].count = fail + 1; /* add final attempt */
+
+       /* tx_status->rts_retry_count = tx_resp->failure_rts; */
+       info->flags |=
+           ((status & TX_STATUS_MSK) ==
+            TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+       D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+            il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+            tx_resp->failure_frame);
+
+       D_TX_REPLY("Tx queue reclaim %d\n", idx);
+       il3945_tx_queue_reclaim(il, txq_id, idx);
+
+       if (status & TX_ABORT_REQUIRED_MSK)
+               IL_ERR("TODO:  Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ *  RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+       int i;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+
+       prev_stats = (__le32 *) &il->_3945.stats;
+       accum_stats = (u32 *) &il->_3945.accum_stats;
+       delta = (u32 *) &il->_3945.delta_stats;
+       max_delta = (u32 *) &il->_3945.max_delta;
+
+       for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+            i +=
+            sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+            accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta =
+                           (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
+               }
+       }
+
+       /* reset accumulative stats for "no-counter" type stats */
+       il->_3945.accum_stats.general.temperature =
+           il->_3945.stats.general.temperature;
+       il->_3945.accum_stats.general.ttl_timestamp =
+           il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+       D_RX("Statistics notification received (%d vs %d).\n",
+            (int)sizeof(struct il3945_notif_stats),
+            le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+       memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       __le32 *flag = (__le32 *) &pkt->u.raw;
+
+       if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+               memset(&il->_3945.accum_stats, 0,
+                      sizeof(struct il3945_notif_stats));
+               memset(&il->_3945.delta_stats, 0,
+                      sizeof(struct il3945_notif_stats));
+               memset(&il->_3945.max_delta, 0,
+                      sizeof(struct il3945_notif_stats));
+#endif
+               D_RX("Statistics have been cleared\n");
+       }
+       il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+       /* Filter incoming packets to determine if they are targeted toward
+        * this network, discarding packets coming from ourselves */
+       switch (il->iw_mode) {
+       case NL80211_IFTYPE_ADHOC:      /* Header: Dest. | Source    | BSSID */
+               /* packets to our IBSS update information */
+               return !compare_ether_addr(header->addr3, il->bssid);
+       case NL80211_IFTYPE_STATION:    /* Header: Dest. | AP{BSSID} | Source */
+               /* packets to our IBSS update information */
+               return !compare_ether_addr(header->addr2, il->bssid);
+       default:
+               return 1;
+       }
+}
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+                              struct ieee80211_rx_status *stats)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+       struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+       struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+       u16 len = le16_to_cpu(rx_hdr->len);
+       struct sk_buff *skb;
+       __le16 fc = hdr->frame_control;
+
+       /* We received data from the HW, so stop the watchdog */
+       if (unlikely
+           (len + IL39_RX_FRAME_SIZE >
+            PAGE_SIZE << il->hw_params.rx_page_order)) {
+               D_DROP("Corruption detected!\n");
+               return;
+       }
+
+       /* We only process data packets if the interface is open */
+       if (unlikely(!il->is_open)) {
+               D_DROP("Dropping packet while interface is not open.\n");
+               return;
+       }
+
+       skb = dev_alloc_skb(128);
+       if (!skb) {
+               IL_ERR("dev_alloc_skb failed\n");
+               return;
+       }
+
+       if (!il3945_mod_params.sw_crypto)
+               il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+                                     le32_to_cpu(rx_end->status), stats);
+
+       skb_add_rx_frag(skb, 0, rxb->page,
+                       (void *)rx_hdr->payload - (void *)pkt, len);
+
+       il_update_stats(il, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(il->hw, skb);
+       il->alloc_rxb_page--;
+       rxb->page = NULL;
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct ieee80211_hdr *header;
+       struct ieee80211_rx_status rx_status;
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+       struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+       struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+       u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+       u16 rx_stats_noise_diff __maybe_unused =
+           le16_to_cpu(rx_stats->noise_diff);
+       u8 network_packet;
+
+       rx_status.flag = 0;
+       rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+       rx_status.band =
+           (rx_hdr->
+            phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+           IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+           ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+                                          rx_status.band);
+
+       rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+       if (rx_status.band == IEEE80211_BAND_5GHZ)
+               rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+       rx_status.antenna =
+           (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+           4;
+
+       /* set the preamble flag if appropriate */
+       if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       if ((unlikely(rx_stats->phy_count > 20))) {
+               D_DROP("dsp size out of range [0,20]: %d/n",
+                      rx_stats->phy_count);
+               return;
+       }
+
+       if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+           !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+               D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+               return;
+       }
+
+       /* Convert 3945's rssi indicator to dBm */
+       rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+       D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+               rx_stats_sig_avg, rx_stats_noise_diff);
+
+       header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+       network_packet = il3945_is_network_packet(il, header);
+
+       D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+               network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+               rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+       il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
+
+       if (network_packet) {
+               il->_3945.last_beacon_time =
+                   le32_to_cpu(rx_end->beacon_timestamp);
+               il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+               il->_3945.last_rx_rssi = rx_status.signal;
+       }
+
+       il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+                               dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+       int count;
+       struct il_queue *q;
+       struct il3945_tfd *tfd, *tfd_tmp;
+
+       q = &txq->q;
+       tfd_tmp = (struct il3945_tfd *)txq->tfds;
+       tfd = &tfd_tmp[q->write_ptr];
+
+       if (reset)
+               memset(tfd, 0, sizeof(*tfd));
+
+       count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+       if (count >= NUM_TFD_CHUNKS || count < 0) {
+               IL_ERR("Error can not send more than %d chunks\n",
+                      NUM_TFD_CHUNKS);
+               return -EINVAL;
+       }
+
+       tfd->tbs[count].addr = cpu_to_le32(addr);
+       tfd->tbs[count].len = cpu_to_le32(len);
+
+       count++;
+
+       tfd->control_flags =
+           cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+       return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+       struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+       int idx = txq->q.read_ptr;
+       struct il3945_tfd *tfd = &tfd_tmp[idx];
+       struct pci_dev *dev = il->pci_dev;
+       int i;
+       int counter;
+
+       /* sanity check */
+       counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+       if (counter > NUM_TFD_CHUNKS) {
+               IL_ERR("Too many chunks: %i\n", counter);
+               /* @todo issue fatal error, it is quite serious situation */
+               return;
+       }
+
+       /* Unmap tx_cmd */
+       if (counter)
+               pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+                                dma_unmap_len(&txq->meta[idx], len),
+                                PCI_DMA_TODEVICE);
+
+       /* unmap chunks if any */
+
+       for (i = 1; i < counter; i++)
+               pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+                                le32_to_cpu(tfd->tbs[i].len),
+                                PCI_DMA_TODEVICE);
+
+       /* free SKB */
+       if (txq->txb) {
+               struct sk_buff *skb;
+
+               skb = txq->txb[txq->q.read_ptr].skb;
+
+               /* can be called from irqs-disabled context */
+               if (skb) {
+                       dev_kfree_skb_any(skb);
+                       txq->txb[txq->q.read_ptr].skb = NULL;
+               }
+       }
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+                           struct ieee80211_tx_info *info,
+                           struct ieee80211_hdr *hdr, int sta_id, int tx_id)
+{
+       u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+       u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945);
+       u16 rate_mask;
+       int rate;
+       u8 rts_retry_limit;
+       u8 data_retry_limit;
+       __le32 tx_flags;
+       __le16 fc = hdr->frame_control;
+       struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+       rate = il3945_rates[rate_idx].plcp;
+       tx_flags = tx_cmd->tx_flags;
+
+       /* We need to figure out how to get the sta->supp_rates while
+        * in this running context */
+       rate_mask = RATES_MASK_3945;
+
+       /* Set retry limit on DATA packets and Probe Responses */
+       if (ieee80211_is_probe_resp(fc))
+               data_retry_limit = 3;
+       else
+               data_retry_limit = IL_DEFAULT_TX_RETRY;
+       tx_cmd->data_retry_limit = data_retry_limit;
+
+       if (tx_id >= IL39_CMD_QUEUE_NUM)
+               rts_retry_limit = 3;
+       else
+               rts_retry_limit = 7;
+
+       if (data_retry_limit < rts_retry_limit)
+               rts_retry_limit = data_retry_limit;
+       tx_cmd->rts_retry_limit = rts_retry_limit;
+
+       tx_cmd->rate = rate;
+       tx_cmd->tx_flags = tx_flags;
+
+       /* OFDM */
+       tx_cmd->supp_rates[0] =
+           ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+       /* CCK */
+       tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+       D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+              "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+              le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+              tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+       unsigned long flags_spin;
+       struct il_station_entry *station;
+
+       if (sta_id == IL_INVALID_STATION)
+               return IL_INVALID_STATION;
+
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       station = &il->stations[sta_id];
+
+       station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+       station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+       station->sta.mode = STA_CONTROL_MODIFY_MSK;
+       il_send_add_sta(il, &station->sta, CMD_ASYNC);
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+       D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+       return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+               if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+                       il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+                       _il_poll_bit(il, CSR_GPIO_IN,
+                                    CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+                                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+               }
+ */
+
+       il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+                             APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+                             ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+       _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+       il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+       il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+       il_wr(il, FH39_RCSR_WPTR(0), 0);
+       il_wr(il, FH39_RCSR_CONFIG(0),
+             FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+             FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+             FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+             FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+                                                              <<
+                                                              FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+             | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+                                                                FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+             | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+       /* fake read to flush all prev I/O */
+       il_rd(il, FH39_RSSR_CTRL);
+
+       return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+
+       /* bypass mode */
+       il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+       /* RA 0 is active */
+       il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+       /* all 6 fifo are active */
+       il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+       il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+       il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+       il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+       il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+       il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+       il_wr(il, FH39_TSSR_MSG_CONFIG,
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+             FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+       return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+       int rc;
+       int txq_id, slots_num;
+
+       il3945_hw_txq_ctx_free(il);
+
+       /* allocate tx queue structure */
+       rc = il_alloc_txq_mem(il);
+       if (rc)
+               return rc;
+
+       /* Tx CMD queue */
+       rc = il3945_tx_reset(il);
+       if (rc)
+               goto error;
+
+       /* Tx queue(s) */
+       for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+               slots_num =
+                   (txq_id ==
+                    IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+               if (rc) {
+                       IL_ERR("Tx %d queue init failed\n", txq_id);
+                       goto error;
+               }
+       }
+
+       return rc;
+
+error:
+       il3945_hw_txq_ctx_free(il);
+       return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+       int ret = il_apm_init(il);
+
+       /* Clear APMG (NIC's internal power management) interrupts */
+       il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+       il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+       /* Reset radio chip */
+       il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+       udelay(5);
+       il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+       return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       unsigned long flags;
+       u8 rev_id = il->pci_dev->revision;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Determine HW type */
+       D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+       if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+               D_INFO("RTP type\n");
+       else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+               D_INFO("3945 RADIO-MB type\n");
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+       } else {
+               D_INFO("3945 RADIO-MM type\n");
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+       }
+
+       if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+               D_INFO("SKU OP mode is mrc\n");
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+       } else
+               D_INFO("SKU OP mode is basic\n");
+
+       if ((eeprom->board_revision & 0xF0) == 0xD0) {
+               D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+       } else {
+               D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+               il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+                            CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+       }
+
+       if (eeprom->almgor_m_version <= 1) {
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+               D_INFO("Card M type A version is 0x%X\n",
+                      eeprom->almgor_m_version);
+       } else {
+               D_INFO("Card M type B version is 0x%X\n",
+                      eeprom->almgor_m_version);
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+       }
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+               D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+       if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+               D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+       int rc;
+       unsigned long flags;
+       struct il_rx_queue *rxq = &il->rxq;
+
+       spin_lock_irqsave(&il->lock, flags);
+       il->cfg->ops->lib->apm_ops.init(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       il3945_set_pwr_vmain(il);
+
+       il->cfg->ops->lib->apm_ops.config(il);
+
+       /* Allocate the RX queue, or reset if it is already allocated */
+       if (!rxq->bd) {
+               rc = il_rx_queue_alloc(il);
+               if (rc) {
+                       IL_ERR("Unable to initialize Rx queue\n");
+                       return -ENOMEM;
+               }
+       } else
+               il3945_rx_queue_reset(il, rxq);
+
+       il3945_rx_replenish(il);
+
+       il3945_rx_init(il, rxq);
+
+       /* Look at using this instead:
+          rxq->need_update = 1;
+          il_rx_queue_update_write_ptr(il, rxq);
+        */
+
+       il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+       rc = il3945_txq_ctx_reset(il);
+       if (rc)
+               return rc;
+
+       set_bit(S_INIT, &il->status);
+
+       return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+       int txq_id;
+
+       /* Tx queues */
+       if (il->txq)
+               for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+                       if (txq_id == IL39_CMD_QUEUE_NUM)
+                               il_cmd_queue_free(il);
+                       else
+                               il_tx_queue_free(il, txq_id);
+
+       /* free tx queue structure */
+       il_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+       int txq_id;
+
+       /* stop SCD */
+       il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+       il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+       /* reset TFD queues */
+       for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+               il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+               il_poll_bit(il, FH39_TSSR_TX_STATUS,
+                           FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+                           1000);
+       }
+
+       il3945_hw_txq_ctx_free(il);
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+       return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+       return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+       return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       int temperature;
+
+       temperature = il3945_hw_get_temperature(il);
+
+       /* driver's okay range is -260 to +25.
+        *   human readable okay range is 0 to +285 */
+       D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+       /* handle insane temp reading */
+       if (il3945_hw_reg_temp_out_of_range(temperature)) {
+               IL_ERR("Error bad temperature value  %d\n", temperature);
+
+               /* if really really hot(?),
+                *   substitute the 3rd band/group's temp measured at factory */
+               if (il->last_temperature > 100)
+                       temperature = eeprom->groups[2].temperature;
+               else            /* else use most recent "sane" value from driver */
+                       temperature = il->last_temperature;
+       }
+
+       return temperature;     /* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER   6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ *    (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+       int temp_diff;
+
+       il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+       temp_diff = il->temperature - il->last_temperature;
+
+       /* get absolute value */
+       if (temp_diff < 0) {
+               D_POWER("Getting cooler, delta %d,\n", temp_diff);
+               temp_diff = -temp_diff;
+       } else if (temp_diff == 0)
+               D_POWER("Same temp,\n");
+       else
+               D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+       /* if we don't need calibration, *don't* update last_temperature */
+       if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+               D_POWER("Timed thermal calib not needed\n");
+               return 0;
+       }
+
+       D_POWER("Timed thermal calib needed\n");
+
+       /* assume that caller will actually do calib ...
+        *   update the "last temperature" value */
+       il->last_temperature = il->temperature;
+       return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF  -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+       {
+        {251, 127},            /* 2.4 GHz, highest power */
+        {251, 127},
+        {251, 127},
+        {251, 127},
+        {251, 125},
+        {251, 110},
+        {251, 105},
+        {251, 98},
+        {187, 125},
+        {187, 115},
+        {187, 108},
+        {187, 99},
+        {243, 119},
+        {243, 111},
+        {243, 105},
+        {243, 97},
+        {243, 92},
+        {211, 106},
+        {211, 100},
+        {179, 120},
+        {179, 113},
+        {179, 107},
+        {147, 125},
+        {147, 119},
+        {147, 112},
+        {147, 106},
+        {147, 101},
+        {147, 97},
+        {147, 91},
+        {115, 107},
+        {235, 121},
+        {235, 115},
+        {235, 109},
+        {203, 127},
+        {203, 121},
+        {203, 115},
+        {203, 108},
+        {203, 102},
+        {203, 96},
+        {203, 92},
+        {171, 110},
+        {171, 104},
+        {171, 98},
+        {139, 116},
+        {227, 125},
+        {227, 119},
+        {227, 113},
+        {227, 107},
+        {227, 101},
+        {227, 96},
+        {195, 113},
+        {195, 106},
+        {195, 102},
+        {195, 95},
+        {163, 113},
+        {163, 106},
+        {163, 102},
+        {163, 95},
+        {131, 113},
+        {131, 106},
+        {131, 102},
+        {131, 95},
+        {99, 113},
+        {99, 106},
+        {99, 102},
+        {99, 95},
+        {67, 113},
+        {67, 106},
+        {67, 102},
+        {67, 95},
+        {35, 113},
+        {35, 106},
+        {35, 102},
+        {35, 95},
+        {3, 113},
+        {3, 106},
+        {3, 102},
+        {3, 95}                /* 2.4 GHz, lowest power */
+       },
+       {
+        {251, 127},            /* 5.x GHz, highest power */
+        {251, 120},
+        {251, 114},
+        {219, 119},
+        {219, 101},
+        {187, 113},
+        {187, 102},
+        {155, 114},
+        {155, 103},
+        {123, 117},
+        {123, 107},
+        {123, 99},
+        {123, 92},
+        {91, 108},
+        {59, 125},
+        {59, 118},
+        {59, 109},
+        {59, 102},
+        {59, 96},
+        {59, 90},
+        {27, 104},
+        {27, 98},
+        {27, 92},
+        {115, 118},
+        {115, 111},
+        {115, 104},
+        {83, 126},
+        {83, 121},
+        {83, 113},
+        {83, 105},
+        {83, 99},
+        {51, 118},
+        {51, 111},
+        {51, 104},
+        {51, 98},
+        {19, 116},
+        {19, 109},
+        {19, 102},
+        {19, 98},
+        {19, 93},
+        {171, 113},
+        {171, 107},
+        {171, 99},
+        {139, 120},
+        {139, 113},
+        {139, 107},
+        {139, 99},
+        {107, 120},
+        {107, 113},
+        {107, 107},
+        {107, 99},
+        {75, 120},
+        {75, 113},
+        {75, 107},
+        {75, 99},
+        {43, 120},
+        {43, 113},
+        {43, 107},
+        {43, 99},
+        {11, 120},
+        {11, 113},
+        {11, 107},
+        {11, 99},
+        {131, 107},
+        {131, 99},
+        {99, 120},
+        {99, 113},
+        {99, 107},
+        {99, 99},
+        {67, 120},
+        {67, 113},
+        {67, 107},
+        {67, 99},
+        {35, 120},
+        {35, 113},
+        {35, 107},
+        {35, 99},
+        {3, 120}               /* 5.x GHz, lowest power */
+       }
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+       if (idx < 0)
+               return 0;
+       if (idx >= IL_MAX_GAIN_ENTRIES)
+               return IL_MAX_GAIN_ENTRIES - 1;
+       return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+                            const s8 *clip_pwrs,
+                            struct il_channel_info *ch_info, int band_idx)
+{
+       struct il3945_scan_power_info *scan_power_info;
+       s8 power;
+       u8 power_idx;
+
+       scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+       /* use this channel group's 6Mbit clipping/saturation pwr,
+        *   but cap at regulatory scan power restriction (set during init
+        *   based on eeprom channel data) for this channel.  */
+       power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+       power = min(power, il->tx_power_user_lmt);
+       scan_power_info->requested_power = power;
+
+       /* find difference between new scan *power* and current "normal"
+        *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
+        *   current "normal" temperature-compensated Tx power *idx* for
+        *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+        *   *idx*. */
+       power_idx =
+           ch_info->power_info[rate_idx].power_table_idx - (power -
+                                                            ch_info->
+                                                            power_info
+                                                            [RATE_6M_IDX_TBL].
+                                                            requested_power) *
+           2;
+
+       /* store reference idx that we use when adjusting *all* scan
+        *   powers.  So we can accommodate user (all channel) or spectrum
+        *   management (single channel) power changes "between" temperature
+        *   feedback compensation procedures.
+        * don't force fit this reference idx into gain table; it may be a
+        *   negative number.  This will help avoid errors when we're at
+        *   the lower bounds (highest gains, for warmest temperatures)
+        *   of the table. */
+
+       /* don't exceed table bounds for "real" setting */
+       power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+       scan_power_info->power_table_idx = power_idx;
+       scan_power_info->tpc.tx_gain =
+           power_gain_table[band_idx][power_idx].tx_gain;
+       scan_power_info->tpc.dsp_atten =
+           power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+       int rate_idx, i;
+       const struct il_channel_info *ch_info = NULL;
+       struct il3945_txpowertable_cmd txpower = {
+               .channel = il->ctx.active.channel,
+       };
+       u16 chan;
+
+       if (WARN_ONCE
+           (test_bit(S_SCAN_HW, &il->status),
+            "TX Power requested while scanning!\n"))
+               return -EAGAIN;
+
+       chan = le16_to_cpu(il->ctx.active.channel);
+
+       txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+       ch_info = il_get_channel_info(il, il->band, chan);
+       if (!ch_info) {
+               IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+                      il->band);
+               return -EINVAL;
+       }
+
+       if (!il_is_channel_valid(ch_info)) {
+               D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+               return 0;
+       }
+
+       /* fill cmd with power settings for all rates for current channel */
+       /* Fill OFDM rate */
+       for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+            rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+               txpower.power[i].tpc = ch_info->power_info[i].tpc;
+               txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+               D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+                       le16_to_cpu(txpower.channel), txpower.band,
+                       txpower.power[i].tpc.tx_gain,
+                       txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+       }
+       /* Fill CCK rates */
+       for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+            rate_idx++, i++) {
+               txpower.power[i].tpc = ch_info->power_info[i].tpc;
+               txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+               D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+                       le16_to_cpu(txpower.channel), txpower.band,
+                       txpower.power[i].tpc.tx_gain,
+                       txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+       }
+
+       return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+                              sizeof(struct il3945_txpowertable_cmd),
+                              &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update.  Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ *      properly fill out the scan powers, and actual h/w gain settings,
+ *      and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+       struct il3945_channel_power_info *power_info;
+       int power_changed = 0;
+       int i;
+       const s8 *clip_pwrs;
+       int power;
+
+       /* Get this chnlgrp's rate-to-max/clip-powers table */
+       clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+       /* Get this channel's rate-to-current-power settings table */
+       power_info = ch_info->power_info;
+
+       /* update OFDM Txpower settings */
+       for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+               int delta_idx;
+
+               /* limit new power to be no more than h/w capability */
+               power = min(ch_info->curr_txpow, clip_pwrs[i]);
+               if (power == power_info->requested_power)
+                       continue;
+
+               /* find difference between old and new requested powers,
+                *    update base (non-temp-compensated) power idx */
+               delta_idx = (power - power_info->requested_power) * 2;
+               power_info->base_power_idx -= delta_idx;
+
+               /* save new requested power value */
+               power_info->requested_power = power;
+
+               power_changed = 1;
+       }
+
+       /* update CCK Txpower settings, based on OFDM 12M setting ...
+        *    ... all CCK power settings for a given channel are the *same*. */
+       if (power_changed) {
+               power =
+                   ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+                   IL_CCK_FROM_OFDM_POWER_DIFF;
+
+               /* do all CCK rates' il3945_channel_power_info structures */
+               for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+                       power_info->requested_power = power;
+                       power_info->base_power_idx =
+                           ch_info->power_info[RATE_12M_IDX_TBL].
+                           base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+                       ++power_info;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ *      based strictly on regulatory (eeprom and spectrum mgt) limitations
+ *      (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+       s8 max_power;
+
+#if 0
+       /* if we're using TGd limits, use lower of TGd or EEPROM */
+       if (ch_info->tgd_data.max_power != 0)
+               max_power =
+                   min(ch_info->tgd_data.max_power,
+                       ch_info->eeprom.max_power_avg);
+
+       /* else just use EEPROM limits */
+       else
+#endif
+               max_power = ch_info->eeprom.max_power_avg;
+
+       return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ *   and the factory calibration temperatures, and bases the new settings
+ *   on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+       struct il_channel_info *ch_info = NULL;
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       int delta_idx;
+       const s8 *clip_pwrs;    /* array of h/w max power levels for each rate */
+       u8 a_band;
+       u8 rate_idx;
+       u8 scan_tbl_idx;
+       u8 i;
+       int ref_temp;
+       int temperature = il->temperature;
+
+       if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+               /* do not perform tx power calibration */
+               return 0;
+       }
+       /* set up new Tx power info for each and every channel, 2.4 and 5.x */
+       for (i = 0; i < il->channel_count; i++) {
+               ch_info = &il->channel_info[i];
+               a_band = il_is_channel_a_band(ch_info);
+
+               /* Get this chnlgrp's factory calibration temperature */
+               ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+               /* get power idx adjustment based on current and factory
+                * temps */
+               delta_idx =
+                   il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+               /* set tx power value for all rates, OFDM and CCK */
+               for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+                       int power_idx =
+                           ch_info->power_info[rate_idx].base_power_idx;
+
+                       /* temperature compensate */
+                       power_idx += delta_idx;
+
+                       /* stay within table range */
+                       power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+                       ch_info->power_info[rate_idx].power_table_idx =
+                           (u8) power_idx;
+                       ch_info->power_info[rate_idx].tpc =
+                           power_gain_table[a_band][power_idx];
+               }
+
+               /* Get this chnlgrp's rate-to-max/clip-powers table */
+               clip_pwrs =
+                   il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+               for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+                    scan_tbl_idx++) {
+                       s32 actual_idx =
+                           (scan_tbl_idx ==
+                            0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+                       il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+                                                    actual_idx, clip_pwrs,
+                                                    ch_info, a_band);
+               }
+       }
+
+       /* send Txpower command for current channel to ucode */
+       return il->cfg->ops->lib->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+       struct il_channel_info *ch_info;
+       s8 max_power;
+       u8 a_band;
+       u8 i;
+
+       if (il->tx_power_user_lmt == power) {
+               D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+                       power);
+               return 0;
+       }
+
+       D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+       il->tx_power_user_lmt = power;
+
+       /* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+       for (i = 0; i < il->channel_count; i++) {
+               ch_info = &il->channel_info[i];
+               a_band = il_is_channel_a_band(ch_info);
+
+               /* find minimum power of all user and regulatory constraints
+                *    (does not consider h/w clipping limitations) */
+               max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+               max_power = min(power, max_power);
+               if (max_power != ch_info->curr_txpow) {
+                       ch_info->curr_txpow = max_power;
+
+                       /* this considers the h/w clipping limitations */
+                       il3945_hw_reg_set_new_power(il, ch_info);
+               }
+       }
+
+       /* update txpower settings for all channels,
+        *   send to NIC if associated. */
+       il3945_is_temp_calib_needed(il);
+       il3945_hw_reg_comp_txpower_temp(il);
+
+       return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       int rc = 0;
+       struct il_rx_pkt *pkt;
+       struct il3945_rxon_assoc_cmd rxon_assoc;
+       struct il_host_cmd cmd = {
+               .id = C_RXON_ASSOC,
+               .len = sizeof(rxon_assoc),
+               .flags = CMD_WANT_SKB,
+               .data = &rxon_assoc,
+       };
+       const struct il_rxon_cmd *rxon1 = &ctx->staging;
+       const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+       if (rxon1->flags == rxon2->flags &&
+           rxon1->filter_flags == rxon2->filter_flags &&
+           rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+           rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+               D_INFO("Using current RXON_ASSOC.  Not resending.\n");
+               return 0;
+       }
+
+       rxon_assoc.flags = ctx->staging.flags;
+       rxon_assoc.filter_flags = ctx->staging.filter_flags;
+       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+       rxon_assoc.reserved = 0;
+
+       rc = il_send_cmd_sync(il, &cmd);
+       if (rc)
+               return rc;
+
+       pkt = (struct il_rx_pkt *)cmd.reply_page;
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from C_RXON_ASSOC command\n");
+               rc = -EIO;
+       }
+
+       il_free_pages(il, cmd.reply_page);
+
+       return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data.  This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       /* cast away the const for active_rxon in this function */
+       struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+       struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+       int rc = 0;
+       bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return -EINVAL;
+
+       if (!il_is_alive(il))
+               return -1;
+
+       /* always get timestamp with Rx frame */
+       staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+       /* select antenna */
+       staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+       staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+       rc = il_check_rxon_cmd(il, ctx);
+       if (rc) {
+               IL_ERR("Invalid RXON configuration.  Not committing.\n");
+               return -EINVAL;
+       }
+
+       /* If we don't need to send a full RXON, we can use
+        * il3945_rxon_assoc_cmd which is used to reconfigure filter
+        * and other flags for the current radio configuration. */
+       if (!il_full_rxon_required(il, &il->ctx)) {
+               rc = il_send_rxon_assoc(il, &il->ctx);
+               if (rc) {
+                       IL_ERR("Error setting RXON_ASSOC "
+                              "configuration (%d).\n", rc);
+                       return rc;
+               }
+
+               memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+               /*
+                * We do not commit tx power settings while channel changing,
+                * do it now if tx power changed.
+                */
+               il_set_tx_power(il, il->tx_power_next, false);
+               return 0;
+       }
+
+       /* If we are currently associated and the new config requires
+        * an RXON_ASSOC and the new config wants the associated mask enabled,
+        * we must clear the associated from the active configuration
+        * before we apply the new config */
+       if (il_is_associated(il) && new_assoc) {
+               D_INFO("Toggling associated bit on current RXON\n");
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+               /*
+                * reserved4 and 5 could have been filled by the iwlcore code.
+                * Let's clear them before pushing to the 3945.
+                */
+               active_rxon->reserved4 = 0;
+               active_rxon->reserved5 = 0;
+               rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+                                    &il->ctx.active);
+
+               /* If the mask clearing failed then we set
+                * active_rxon back to what it was previously */
+               if (rc) {
+                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+                       IL_ERR("Error clearing ASSOC_MSK on current "
+                              "configuration (%d).\n", rc);
+                       return rc;
+               }
+               il_clear_ucode_stations(il, &il->ctx);
+               il_restore_stations(il, &il->ctx);
+       }
+
+       D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+              "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+              le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+       /*
+        * reserved4 and 5 could have been filled by the iwlcore code.
+        * Let's clear them before pushing to the 3945.
+        */
+       staging_rxon->reserved4 = 0;
+       staging_rxon->reserved5 = 0;
+
+       il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+
+       /* Apply the new configuration */
+       rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+                            staging_rxon);
+       if (rc) {
+               IL_ERR("Error setting new configuration (%d).\n", rc);
+               return rc;
+       }
+
+       memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+       if (!new_assoc) {
+               il_clear_ucode_stations(il, &il->ctx);
+               il_restore_stations(il, &il->ctx);
+       }
+
+       /* If we issue a new RXON command which required a tune then we must
+        * send a new TXPOWER command or we won't be able to Tx any frames */
+       rc = il_set_tx_power(il, il->tx_power_next, true);
+       if (rc) {
+               IL_ERR("Error setting Tx power (%d).\n", rc);
+               return rc;
+       }
+
+       /* Init the hardware's rate fallback order based on the band */
+       rc = il3945_init_hw_rate_table(il);
+       if (rc) {
+               IL_ERR("Error setting HW rate table: %02X\n", rc);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic -  called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ *     -- correct coeffs for temp (can reset temp timer)
+ *     -- save this temp as "last",
+ *     -- send new set of gain settings to NIC
+ * NOTE:  This should continue working, even when we're not associated,
+ *   so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+       /* This will kick in the "brute force"
+        * il3945_hw_reg_comp_txpower_temp() below */
+       if (!il3945_is_temp_calib_needed(il))
+               goto reschedule;
+
+       /* Set up a new set of temp-adjusted TxPowers, send to NIC.
+        * This is based *only* on current temperature,
+        * ignoring any previous power measurements */
+       il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+       queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+                          REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+       struct il_priv *il = container_of(work, struct il_priv,
+                                         _3945.thermal_periodic.work);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       mutex_lock(&il->mutex);
+       il3945_reg_txpower_periodic(il);
+       mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ *      These channel groups are based on factory-tested channels;
+ *      on A-band, EEPROM's "group frequency" entries represent the top
+ *      channel in each group 1-4.  Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+                            const struct il_channel_info *ch_info)
+{
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+       u8 group;
+       u16 group_idx = 0;      /* based on factory calib frequencies */
+       u8 grp_channel;
+
+       /* Find the group idx for the channel ... don't use idx 1(?) */
+       if (il_is_channel_a_band(ch_info)) {
+               for (group = 1; group < 5; group++) {
+                       grp_channel = ch_grp[group].group_channel;
+                       if (ch_info->channel <= grp_channel) {
+                               group_idx = group;
+                               break;
+                       }
+               }
+               /* group 4 has a few channels *above* its factory cal freq */
+               if (group == 5)
+                       group_idx = 4;
+       } else
+               group_idx = 0;  /* 2.4 GHz, group 0 */
+
+       D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+       return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ *   into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+                                   s32 setting_idx, s32 *new_idx)
+{
+       const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       s32 idx0, idx1;
+       s32 power = 2 * requested_power;
+       s32 i;
+       const struct il3945_eeprom_txpower_sample *samples;
+       s32 gains0, gains1;
+       s32 res;
+       s32 denominator;
+
+       chnl_grp = &eeprom->groups[setting_idx];
+       samples = chnl_grp->samples;
+       for (i = 0; i < 5; i++) {
+               if (power == samples[i].power) {
+                       *new_idx = samples[i].gain_idx;
+                       return 0;
+               }
+       }
+
+       if (power > samples[1].power) {
+               idx0 = 0;
+               idx1 = 1;
+       } else if (power > samples[2].power) {
+               idx0 = 1;
+               idx1 = 2;
+       } else if (power > samples[3].power) {
+               idx0 = 2;
+               idx1 = 3;
+       } else {
+               idx0 = 3;
+               idx1 = 4;
+       }
+
+       denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+       if (denominator == 0)
+               return -EINVAL;
+       gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+       gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+       res =
+           gains0 + (gains1 - gains0) * ((s32) power -
+                                         (s32) samples[idx0].power) /
+           denominator + (1 << 18);
+       *new_idx = res >> 19;
+       return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+       u32 i;
+       s32 rate_idx;
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       const struct il3945_eeprom_txpower_group *group;
+
+       D_POWER("Initializing factory calib info from EEPROM\n");
+
+       for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+               s8 *clip_pwrs;  /* table of power levels for each rate */
+               s8 satur_pwr;   /* saturation power for each chnl group */
+               group = &eeprom->groups[i];
+
+               /* sanity check on factory saturation power value */
+               if (group->saturation_power < 40) {
+                       IL_WARN("Error: saturation power is %d, "
+                               "less than minimum expected 40\n",
+                               group->saturation_power);
+                       return;
+               }
+
+               /*
+                * Derive requested power levels for each rate, based on
+                *   hardware capabilities (saturation power for band).
+                * Basic value is 3dB down from saturation, with further
+                *   power reductions for highest 3 data rates.  These
+                *   backoffs provide headroom for high rate modulation
+                *   power peaks, without too much distortion (clipping).
+                */
+               /* we'll fill in this array with h/w max power levels */
+               clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+               /* divide factory saturation power by 2 to find -3dB level */
+               satur_pwr = (s8) (group->saturation_power >> 1);
+
+               /* fill in channel group's nominal powers for each rate */
+               for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+                    rate_idx++, clip_pwrs++) {
+                       switch (rate_idx) {
+                       case RATE_36M_IDX_TBL:
+                               if (i == 0)     /* B/G */
+                                       *clip_pwrs = satur_pwr;
+                               else    /* A */
+                                       *clip_pwrs = satur_pwr - 5;
+                               break;
+                       case RATE_48M_IDX_TBL:
+                               if (i == 0)
+                                       *clip_pwrs = satur_pwr - 7;
+                               else
+                                       *clip_pwrs = satur_pwr - 10;
+                               break;
+                       case RATE_54M_IDX_TBL:
+                               if (i == 0)
+                                       *clip_pwrs = satur_pwr - 9;
+                               else
+                                       *clip_pwrs = satur_pwr - 12;
+                               break;
+                       default:
+                               *clip_pwrs = satur_pwr;
+                               break;
+                       }
+               }
+       }
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+       struct il_channel_info *ch_info = NULL;
+       struct il3945_channel_power_info *pwr_info;
+       struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+       int delta_idx;
+       u8 rate_idx;
+       u8 scan_tbl_idx;
+       const s8 *clip_pwrs;    /* array of power levels for each rate */
+       u8 gain, dsp_atten;
+       s8 power;
+       u8 pwr_idx, base_pwr_idx, a_band;
+       u8 i;
+       int temperature;
+
+       /* save temperature reference,
+        *   so we can determine next time to calibrate */
+       temperature = il3945_hw_reg_txpower_get_temperature(il);
+       il->last_temperature = temperature;
+
+       il3945_hw_reg_init_channel_groups(il);
+
+       /* initialize Tx power info for each and every channel, 2.4 and 5.x */
+       for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+            i++, ch_info++) {
+               a_band = il_is_channel_a_band(ch_info);
+               if (!il_is_channel_valid(ch_info))
+                       continue;
+
+               /* find this channel's channel group (*not* "band") idx */
+               ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+               /* Get this chnlgrp's rate->max/clip-powers table */
+               clip_pwrs =
+                   il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+               /* calculate power idx *adjustment* value according to
+                *  diff between current temperature and factory temperature */
+               delta_idx =
+                   il3945_hw_reg_adjust_power_by_temp(temperature,
+                                                      eeprom->groups[ch_info->
+                                                                     group_idx].
+                                                      temperature);
+
+               D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+                       delta_idx, temperature + IL_TEMP_CONVERT);
+
+               /* set tx power value for all OFDM rates */
+               for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+                       s32 uninitialized_var(power_idx);
+                       int rc;
+
+                       /* use channel group's clip-power table,
+                        *   but don't exceed channel's max power */
+                       s8 pwr = min(ch_info->max_power_avg,
+                                    clip_pwrs[rate_idx]);
+
+                       pwr_info = &ch_info->power_info[rate_idx];
+
+                       /* get base (i.e. at factory-measured temperature)
+                        *    power table idx for this rate's power */
+                       rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+                                                                ch_info->
+                                                                group_idx,
+                                                                &power_idx);
+                       if (rc) {
+                               IL_ERR("Invalid power idx\n");
+                               return rc;
+                       }
+                       pwr_info->base_power_idx = (u8) power_idx;
+
+                       /* temperature compensate */
+                       power_idx += delta_idx;
+
+                       /* stay within range of gain table */
+                       power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+                       /* fill 1 OFDM rate's il3945_channel_power_info struct */
+                       pwr_info->requested_power = pwr;
+                       pwr_info->power_table_idx = (u8) power_idx;
+                       pwr_info->tpc.tx_gain =
+                           power_gain_table[a_band][power_idx].tx_gain;
+                       pwr_info->tpc.dsp_atten =
+                           power_gain_table[a_band][power_idx].dsp_atten;
+               }
+
+               /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+               pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+               power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+               pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+               base_pwr_idx =
+                   pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+               /* stay within table range */
+               pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+               gain = power_gain_table[a_band][pwr_idx].tx_gain;
+               dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+               /* fill each CCK rate's il3945_channel_power_info structure
+                * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
+                * NOTE:  CCK rates start at end of OFDM rates! */
+               for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+                       pwr_info =
+                           &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+                       pwr_info->requested_power = power;
+                       pwr_info->power_table_idx = pwr_idx;
+                       pwr_info->base_power_idx = base_pwr_idx;
+                       pwr_info->tpc.tx_gain = gain;
+                       pwr_info->tpc.dsp_atten = dsp_atten;
+               }
+
+               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+               for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+                    scan_tbl_idx++) {
+                       s32 actual_idx =
+                           (scan_tbl_idx ==
+                            0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+                       il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+                                                    actual_idx, clip_pwrs,
+                                                    ch_info, a_band);
+               }
+       }
+
+       return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+       int rc;
+
+       il_wr(il, FH39_RCSR_CONFIG(0), 0);
+       rc = il_poll_bit(il, FH39_RSSR_STATUS,
+                        FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+       if (rc < 0)
+               IL_ERR("Can't stop Rx DMA.\n");
+
+       return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+       int txq_id = txq->q.id;
+
+       struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+       shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+       il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+       il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+       il_wr(il, FH39_TCSR_CONFIG(txq_id),
+             FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+             FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+             FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+             FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+             FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+       /* fake read to flush all prev. writes */
+       _il_rd(il, FH39_TSSR_CBB_BASE);
+
+       return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+       switch (cmd_id) {
+       case C_RXON:
+               return sizeof(struct il3945_rxon_cmd);
+       case C_POWER_TBL:
+               return sizeof(struct il3945_powertable_cmd);
+       default:
+               return len;
+       }
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+       struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+       addsta->mode = cmd->mode;
+       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+       memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+       addsta->station_flags = cmd->station_flags;
+       addsta->station_flags_msk = cmd->station_flags_msk;
+       addsta->tid_disable_tx = cpu_to_le16(0);
+       addsta->rate_n_flags = cmd->rate_n_flags;
+       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+       return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       int ret;
+       u8 sta_id;
+       unsigned long flags;
+
+       if (sta_id_r)
+               *sta_id_r = IL_INVALID_STATION;
+
+       ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+       if (ret) {
+               IL_ERR("Unable to add station %pM\n", addr);
+               return ret;
+       }
+
+       if (sta_id_r)
+               *sta_id_r = sta_id;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].used |= IL_STA_LOCAL;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+                          bool add)
+{
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+       int ret;
+
+       if (add) {
+               ret =
+                   il3945_add_bssid_station(il, vif->bss_conf.bssid,
+                                            &vif_priv->ibss_bssid_sta_id);
+               if (ret)
+                       return ret;
+
+               il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+                               (il->band ==
+                                IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+                               RATE_1M_PLCP);
+               il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+               return 0;
+       }
+
+       return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+                                vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+       int rc, i, idx, prev_idx;
+       struct il3945_rate_scaling_cmd rate_cmd = {
+               .reserved = {0, 0, 0},
+       };
+       struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+       for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+               idx = il3945_rates[i].table_rs_idx;
+
+               table[idx].rate_n_flags =
+                   il3945_hw_set_rate_n_flags(il3945_rates[i].plcp, 0);
+               table[idx].try_cnt = il->retry_rate;
+               prev_idx = il3945_get_prev_ieee_rate(i);
+               table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+       }
+
+       switch (il->band) {
+       case IEEE80211_BAND_5GHZ:
+               D_RATE("Select A mode rate scale\n");
+               /* If one of the following CCK rates is used,
+                * have it fall back to the 6M OFDM rate */
+               for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+                       table[i].next_rate_idx =
+                           il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+               /* Don't fall back to CCK rates */
+               table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+               /* Don't drop out of OFDM rates */
+               table[RATE_6M_IDX_TBL].next_rate_idx =
+                   il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+               break;
+
+       case IEEE80211_BAND_2GHZ:
+               D_RATE("Select B/G mode rate scale\n");
+               /* If an OFDM rate is used, have it fall back to the
+                * 1M CCK rates */
+
+               if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+                   il_is_associated(il)) {
+
+                       idx = IL_FIRST_CCK_RATE;
+                       for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+                               table[i].next_rate_idx =
+                                   il3945_rates[idx].table_rs_idx;
+
+                       idx = RATE_11M_IDX_TBL;
+                       /* CCK shouldn't fall back to OFDM... */
+                       table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+               }
+               break;
+
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       /* Update the rate scaling for control frame Tx */
+       rate_cmd.table_id = 0;
+       rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+       if (rc)
+               return rc;
+
+       /* Update the rate scaling for data frame Tx */
+       rate_cmd.table_id = 1;
+       return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+       memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+       il->_3945.shared_virt =
+           dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+                              &il->_3945.shared_phys, GFP_KERNEL);
+       if (!il->_3945.shared_virt) {
+               IL_ERR("failed to allocate pci memory\n");
+               return -ENOMEM;
+       }
+
+       /* Assign number of Usable TX queues */
+       il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+
+       il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+       il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+       il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+       il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+       il->hw_params.max_stations = IL3945_STATION_COUNT;
+       il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
+
+       il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+       il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+       il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+       il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+       return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+                        u8 rate)
+{
+       struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+       unsigned int frame_size;
+
+       tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+       tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       frame_size =
+           il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+                                    sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+       BUG_ON(frame_size > MAX_MPDU_SIZE);
+       tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+       tx_beacon_cmd->tx.rate = rate;
+       tx_beacon_cmd->tx.tx_flags =
+           (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+       /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+       tx_beacon_cmd->tx.supp_rates[0] =
+           (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+       tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+       return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+       il->handlers[C_TX] = il3945_hdl_tx;
+       il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+       INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+                         il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+       cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+       __le32 *image = il->ucode_boot.v_addr;
+       u32 len = il->ucode_boot.len;
+       u32 reg;
+       u32 val;
+
+       D_INFO("Begin verify bsm\n");
+
+       /* verify BSM SRAM contents */
+       val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+       for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+            reg += sizeof(u32), image++) {
+               val = il_rd_prph(il, reg);
+               if (val != le32_to_cpu(*image)) {
+                       IL_ERR("BSM uCode verification failed at "
+                              "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+                              BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+                              len, val, le32_to_cpu(*image));
+                       return -EIO;
+               }
+       }
+
+       D_INFO("BSM bootstrap uCode image OK\n");
+
+       return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism.  Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+       _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+       return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+       return;
+}
+
+ /**
+  * il3945_load_bsm - Load bootstrap instructions
+  *
+  * BSM operation:
+  *
+  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+  * in special SRAM that does not power down during RFKILL.  When powering back
+  * up after power-saving sleeps (or during initial uCode load), the BSM loads
+  * the bootstrap program into the on-board processor, and starts it.
+  *
+  * The bootstrap program loads (via DMA) instructions and data for a new
+  * program from host DRAM locations indicated by the host driver in the
+  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+  * automatically.
+  *
+  * When initializing the NIC, the host driver points the BSM to the
+  * "initialize" uCode image.  This uCode sets up some internal data, then
+  * notifies host via "initialize alive" that it is complete.
+  *
+  * The host then replaces the BSM_DRAM_* pointer values to point to the
+  * normal runtime uCode instructions and a backup uCode data cache buffer
+  * (filled initially with starting data values for the on-board processor),
+  * then triggers the "initialize" uCode to load and launch the runtime uCode,
+  * which begins normal operation.
+  *
+  * When doing a power-save shutdown, runtime uCode saves data SRAM into
+  * the backup data cache in DRAM before SRAM is powered down.
+  *
+  * When powering back up, the BSM loads the bootstrap program.  This reloads
+  * the runtime uCode instructions and the backup data cache into SRAM,
+  * and re-launches the runtime uCode from where it left off.
+  */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+       __le32 *image = il->ucode_boot.v_addr;
+       u32 len = il->ucode_boot.len;
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+       u32 inst_len;
+       u32 data_len;
+       int rc;
+       int i;
+       u32 done;
+       u32 reg_offset;
+
+       D_INFO("Begin load bsm\n");
+
+       /* make sure bootstrap program is no larger than BSM's SRAM size */
+       if (len > IL39_MAX_BSM_SIZE)
+               return -EINVAL;
+
+       /* Tell bootstrap uCode where to find the "Initialize" uCode
+        *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+        * NOTE:  il3945_initialize_alive_start() will replace these values,
+        *        after the "initialize" uCode has run, to point to
+        *        runtime/protocol instructions and backup data cache. */
+       pinst = il->ucode_init.p_addr;
+       pdata = il->ucode_init_data.p_addr;
+       inst_len = il->ucode_init.len;
+       data_len = il->ucode_init_data.len;
+
+       il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+       il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+       il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+       il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+       /* Fill BSM memory with bootstrap instructions */
+       for (reg_offset = BSM_SRAM_LOWER_BOUND;
+            reg_offset < BSM_SRAM_LOWER_BOUND + len;
+            reg_offset += sizeof(u32), image++)
+               _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+       rc = il3945_verify_bsm(il);
+       if (rc)
+               return rc;
+
+       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+       il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+       il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+       il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+       /* Load bootstrap code into instruction SRAM now,
+        *   to prepare to load "initialize" uCode */
+       il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+       /* Wait for load of bootstrap uCode to finish */
+       for (i = 0; i < 100; i++) {
+               done = il_rd_prph(il, BSM_WR_CTRL_REG);
+               if (!(done & BSM_WR_CTRL_REG_BIT_START))
+                       break;
+               udelay(10);
+       }
+       if (i < 100)
+               D_INFO("BSM write complete, poll %d iterations\n", i);
+       else {
+               IL_ERR("BSM write did not complete!\n");
+               return -EIO;
+       }
+
+       /* Enable future boot loads whenever power management unit triggers it
+        *   (e.g. when powering back up after power-save shutdown) */
+       il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+       return 0;
+}
+
+static struct il_hcmd_ops il3945_hcmd = {
+       .rxon_assoc = il3945_send_rxon_assoc,
+       .commit_rxon = il3945_commit_rxon,
+};
+
+static struct il_lib_ops il3945_lib = {
+       .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = il3945_hw_txq_free_tfd,
+       .txq_init = il3945_hw_tx_queue_init,
+       .load_ucode = il3945_load_bsm,
+       .dump_nic_error_log = il3945_dump_nic_error_log,
+       .apm_ops = {
+                   .init = il3945_apm_init,
+                   .config = il3945_nic_config,
+                   },
+       .eeprom_ops = {
+                      .regulatory_bands = {
+                                           EEPROM_REGULATORY_BAND_1_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_2_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_3_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_4_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_5_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_NO_HT40,
+                                           EEPROM_REGULATORY_BAND_NO_HT40,
+                                           },
+                      .acquire_semaphore = il3945_eeprom_acquire_semaphore,
+                      .release_semaphore = il3945_eeprom_release_semaphore,
+                      },
+       .send_tx_power = il3945_send_tx_power,
+       .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       .debugfs_ops = {
+                       .rx_stats_read = il3945_ucode_rx_stats_read,
+                       .tx_stats_read = il3945_ucode_tx_stats_read,
+                       .general_stats_read = il3945_ucode_general_stats_read,
+                       },
+#endif
+};
+
+static const struct il_legacy_ops il3945_legacy_ops = {
+       .post_associate = il3945_post_associate,
+       .config_ap = il3945_config_ap,
+       .manage_ibss_station = il3945_manage_ibss_station,
+};
+
+static struct il_hcmd_utils_ops il3945_hcmd_utils = {
+       .get_hcmd_size = il3945_get_hcmd_size,
+       .build_addsta_hcmd = il3945_build_addsta_hcmd,
+       .request_scan = il3945_request_scan,
+       .post_scan = il3945_post_scan,
+};
+
+static const struct il_ops il3945_ops = {
+       .lib = &il3945_lib,
+       .hcmd = &il3945_hcmd,
+       .utils = &il3945_hcmd_utils,
+       .led = &il3945_led_ops,
+       .legacy = &il3945_legacy_ops,
+       .ieee80211_ops = &il3945_hw_ops,
+};
+
+static struct il_base_params il3945_base_params = {
+       .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+       .num_of_queues = IL39_NUM_QUEUES,
+       .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+       .set_l0s = false,
+       .use_bsm = true,
+       .led_compensation = 64,
+       .wd_timeout = IL_DEF_WD_TIMEOUT,
+};
+
+static struct il_cfg il3945_bg_cfg = {
+       .name = "3945BG",
+       .fw_name_pre = IL3945_FW_PRE,
+       .ucode_api_max = IL3945_UCODE_API_MAX,
+       .ucode_api_min = IL3945_UCODE_API_MIN,
+       .sku = IL_SKU_G,
+       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+       .ops = &il3945_ops,
+       .mod_params = &il3945_mod_params,
+       .base_params = &il3945_base_params,
+       .led_mode = IL_LED_BLINK,
+};
+
+static struct il_cfg il3945_abg_cfg = {
+       .name = "3945ABG",
+       .fw_name_pre = IL3945_FW_PRE,
+       .ucode_api_max = IL3945_UCODE_API_MAX,
+       .ucode_api_min = IL3945_UCODE_API_MIN,
+       .sku = IL_SKU_A | IL_SKU_G,
+       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+       .ops = &il3945_ops,
+       .mod_params = &il3945_mod_params,
+       .base_params = &il3945_base_params,
+       .led_mode = IL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
+       {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+       {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+       {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+       {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+       {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+       {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+       {0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644 (file)
index 0000000..2b2895c
--- /dev/null
@@ -0,0 +1,626 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h>         /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE  "iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon stats being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+       u64 data;
+       s32 success_counter;
+       s32 success_ratio;
+       s32 counter;
+       s32 average_tpt;
+       unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+       spinlock_t lock;
+       struct il_priv *il;
+       s32 *expected_tpt;
+       unsigned long last_partial_flush;
+       unsigned long last_flush;
+       u32 flush_time;
+       u32 last_tx_packets;
+       u32 tx_packets;
+       u8 tgg;
+       u8 flush_pending;
+       u8 start_rate;
+       struct timer_list rate_scale_flush;
+       struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+       struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+       /* used to be in sta_info */
+       int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+       struct il_station_priv_common common;
+       struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+       IL_ANTENNA_DIVERSITY,
+       IL_ANTENNA_MAIN,
+       IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE            2304U
+#define MAX_MPDU_SIZE            2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define        DEFAULT_SHORT_RETRY_LIMIT 7U
+#define        DEFAULT_LONG_RETRY_LIMIT  4U
+
+#define IL_TX_FIFO_AC0 0
+#define IL_TX_FIFO_AC1 1
+#define IL_TX_FIFO_AC2 2
+#define IL_TX_FIFO_AC3 3
+#define IL_TX_FIFO_HCCA_1      5
+#define IL_TX_FIFO_HCCA_2      6
+#define IL_TX_FIFO_NONE        7
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+       union {
+               struct ieee80211_hdr frame;
+               struct il3945_tx_beacon_cmd beacon;
+               u8 raw[IEEE80211_FRAME_LEN];
+               u8 cmd[360];
+       } u;
+       struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+#define IL_SUPPORTED_RATES_IE_LEN         8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT        9
+
+#define IL_INVALID_RATE     0xFF
+#define IL_INVALID_VALUE    -1
+
+#define STA_PS_STATUS_WAKE             0
+#define STA_PS_STATUS_SLEEP            1
+
+struct il3945_ibss_seq {
+       u8 mac[ETH_ALEN];
+       u16 seq_num;
+       u16 frag_num;
+       unsigned long packet_time;
+       struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+                      x->u.rx_frame.stats.payload + \
+                      x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+                      IL_RX_HDR(x)->payload + \
+                      le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int il3945_calc_db_from_ratio(int sig_ratio);
+extern void il3945_rx_replenish(void *data);
+extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+                                            struct ieee80211_hdr *hdr,
+                                            int left);
+extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
+                                    char **buf, bool display);
+extern void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE:  The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_         <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_      <-- Called from work queue context
+ * il3945_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il3945_hw_handler_setup(struct il_priv *il);
+extern void il3945_hw_setup_deferred_work(struct il_priv *il);
+extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
+extern int il3945_hw_rxq_stop(struct il_priv *il);
+extern int il3945_hw_set_hw_params(struct il_priv *il);
+extern int il3945_hw_nic_init(struct il_priv *il);
+extern int il3945_hw_nic_stop_master(struct il_priv *il);
+extern void il3945_hw_txq_ctx_free(struct il_priv *il);
+extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
+extern int il3945_hw_nic_reset(struct il_priv *il);
+extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
+                                          struct il_tx_queue *txq,
+                                          dma_addr_t addr, u16 len, u8 reset,
+                                          u8 pad);
+extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+extern int il3945_hw_get_temperature(struct il_priv *il);
+extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+                                            struct il3945_frame *frame,
+                                            u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+                                struct ieee80211_tx_info *info,
+                                struct ieee80211_hdr *hdr, int sta_id,
+                                int tx_id);
+extern int il3945_hw_reg_send_txpower(struct il_priv *il);
+extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+extern void il3945_disable_events(struct il_priv *il);
+extern int il4965_get_temperature(const struct il_priv *il);
+extern void il3945_post_associate(struct il_priv *il);
+extern void il3945_config_ap(struct il_priv *il);
+
+extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE:  This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+
+extern struct ieee80211_ops il3945_hw_ops;
+
+extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
+extern int il3945_init_hw_rate_table(struct il_priv *il);
+extern void il3945_reg_txpower_periodic(struct il_priv *il);
+extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET       95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ *   to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ *   (PA) output voltage detector.  This same detector is used during Tx of
+ *   long packets in normal operation to provide feedback as to proper output
+ *   level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+       u8 gain_idx;            /* idx into power (gain) setup table ... */
+       s8 power;               /* ... for this pwr level for this chnl group */
+       u16 v_det;              /* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ *    to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+       struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
+       s32 a, b, c, d, e;      /* coefficients for voltage->power
+                                * formula (signed) */
+       s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
+                                * frequency (signed) */
+       s8 saturation_power;    /* highest power possible by h/w in this
+                                * band */
+       u8 group_channel;       /* "representative" channel # in this band */
+       s16 temperature;        /* h/w temperature at factory calib this band
+                                * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ *   difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+       u32 Ta;
+       u32 Tb;
+       u32 Tc;
+       u32 Td;
+       u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+       u8 reserved0[16];
+       u16 device_id;          /* abs.ofs: 16 */
+       u8 reserved1[2];
+       u16 pmc;                /* abs.ofs: 20 */
+       u8 reserved2[20];
+       u8 mac_address[6];      /* abs.ofs: 42 */
+       u8 reserved3[58];
+       u16 board_revision;     /* abs.ofs: 106 */
+       u8 reserved4[11];
+       u8 board_pba_number[9]; /* abs.ofs: 119 */
+       u8 reserved5[8];
+       u16 version;            /* abs.ofs: 136 */
+       u8 sku_cap;             /* abs.ofs: 138 */
+       u8 leds_mode;           /* abs.ofs: 139 */
+       u16 oem_mode;
+       u16 wowlan_mode;        /* abs.ofs: 142 */
+       u16 leds_time_interval; /* abs.ofs: 144 */
+       u8 leds_off_time;       /* abs.ofs: 146 */
+       u8 leds_on_time;        /* abs.ofs: 147 */
+       u8 almgor_m_version;    /* abs.ofs: 148 */
+       u8 antenna_switch_type; /* abs.ofs: 149 */
+       u8 reserved6[42];
+       u8 sku_id[4];           /* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+       u16 band_1_count;       /* abs.ofs: 196 */
+       struct il_eeprom_channel band_1_channels[14];   /* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+       u16 band_2_count;       /* abs.ofs: 226 */
+       struct il_eeprom_channel band_2_channels[13];   /* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+       u16 band_3_count;       /* abs.ofs: 254 */
+       struct il_eeprom_channel band_3_channels[12];   /* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+       u16 band_4_count;       /* abs.ofs: 280 */
+       struct il_eeprom_channel band_4_channels[11];   /* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+       u16 band_5_count;       /* abs.ofs: 304 */
+       struct il_eeprom_channel band_5_channels[6];    /* abs.ofs: 306 */
+
+       u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+       struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+       struct il3945_eeprom_temperature_corr corrections;      /* abs.ofs: 832 */
+       u8 reserved16[172];     /* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)     /* bit 6    */
+#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)     /* bit 7    */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES        5
+#define IL39_CMD_QUEUE_NUM     4
+
+#define IL_DEFAULT_TX_RETRY  15
+
+/*********************************************/
+
+#define RFD_SIZE                              4
+#define NUM_TFD_CHUNKS                        4
+
+#define TFD_CTL_COUNT_SET(n)       (n << 24)
+#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n)         (n << 28)
+#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND              (0x000000)
+#define IL39_RTC_INST_UPPER_BOUND              (0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND              (0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND              (0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+                               IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+                               IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+       return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+               addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+       __le32 tx_base_ptr[8];
+} __packed;
+
+static inline u8
+il3945_hw_get_rate(__le16 rate_n_flags)
+{
+       return le16_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline u16
+il3945_hw_get_rate_n_flags(__le16 rate_n_flags)
+{
+       return le16_to_cpu(rate_n_flags);
+}
+
+static inline __le16
+il3945_hw_set_rate_n_flags(u8 rate, u16 flags)
+{
+       return cpu_to_le16((u16) rate | flags);
+}
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND                   (0x0800)
+#define FH39_MEM_UPPER_BOUND                   (0x1000)
+
+#define FH39_CBCC_TBL          (FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL          (FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL          (FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL          (FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL          (FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL          (FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf)                    (FH39_TFDB_TBL + \
+                                                ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)       (FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch)         (FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch)    (FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch)    (FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch)                 (FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch)          (FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch)                (FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch)            (FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch)       (FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR          (FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL                 (FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS               (FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch)                 (FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch)          (FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch)          (FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch)       (FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL                            (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128          (0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST          (0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH                        (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF               (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER            (0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL     (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL      (0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD            (0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT             (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE             (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE            (0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID           (0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR            (0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON       (0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON       (0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B     (0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON                (0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON                (0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH      (0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH            (0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)    (BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)   (BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+       (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+        FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE                    (0x01000000)
+
+struct il3945_tfd_tb {
+       __le32 addr;
+       __le32 len;
+} __packed;
+
+struct il3945_tfd {
+       __le32 control_flags;
+       struct il3945_tfd_tb tbs[4];
+       u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos);
+ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos);
+ssize_t il3945_ucode_general_stats_read(struct file *file,
+                                       char __user *user_buf, size_t count,
+                                       loff_t *ppos);
+#endif
+
+#endif
similarity index 55%
rename from drivers/net/wireless/iwlegacy/iwl-4965-calib.c
rename to drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877..d3248e3 100644 (file)
 #include <linux/slab.h>
 #include <net/mac80211.h>
 
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
+#include "common.h"
+#include "4965.h"
 
 /*****************************************************************************
  * INIT calibrations framework
  *****************************************************************************/
 
-struct statistics_general_data {
+struct stats_general_data {
        u32 beacon_silence_rssi_a;
        u32 beacon_silence_rssi_b;
        u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
        u32 beacon_energy_c;
 };
 
-void iwl4965_calib_free_results(struct iwl_priv *priv)
+void
+il4965_calib_free_results(struct il_priv *il)
 {
        int i;
 
-       for (i = 0; i < IWL_CALIB_MAX; i++) {
-               kfree(priv->calib_results[i].buf);
-               priv->calib_results[i].buf = NULL;
-               priv->calib_results[i].buf_len = 0;
+       for (i = 0; i < IL_CALIB_MAX; i++) {
+               kfree(il->calib_results[i].buf);
+               il->calib_results[i].buf = NULL;
+               il->calib_results[i].buf_len = 0;
        }
 }
 
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
  *   enough to receive all of our own network traffic, but not so
  *   high that our DSP gets too busy trying to lock onto non-network
  *   activity/noise. */
-static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
-                                  u32 norm_fa,
-                                  u32 rx_enable_time,
-                                  struct statistics_general_data *rx_info)
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+                      struct stats_general_data *rx_info)
 {
        u32 max_nrg_cck = 0;
        int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
        u32 false_alarms = norm_fa * 200 * 1024;
        u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
        u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
-       struct iwl_sensitivity_data *data = NULL;
-       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+       struct il_sensitivity_data *data = NULL;
+       const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
 
-       data = &(priv->sensitivity_data);
+       data = &(il->sensitivity_data);
 
        data->nrg_auto_corr_silence_diff = 0;
 
        /* Find max silence rssi among all 3 receivers.
         * This is background noise, which may include transmissions from other
         *    networks, measured during silence before our network's beacon */
-       silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
-                           ALL_BAND_FILTER) >> 8);
-       silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
-                           ALL_BAND_FILTER) >> 8);
-       silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
-                           ALL_BAND_FILTER) >> 8);
+       silence_rssi_a =
+           (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+       silence_rssi_b =
+           (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+       silence_rssi_c =
+           (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
 
        val = max(silence_rssi_b, silence_rssi_c);
        max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
                val = data->nrg_silence_rssi[i];
                silence_ref = max(silence_ref, val);
        }
-       IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
-                       silence_rssi_a, silence_rssi_b, silence_rssi_c,
-                       silence_ref);
+       D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+               silence_rssi_b, silence_rssi_c, silence_ref);
 
        /* Find max rx energy (min value!) among all 3 receivers,
         *   measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
                max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
        max_nrg_cck += 6;
 
-       IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
-                       rx_info->beacon_energy_a, rx_info->beacon_energy_b,
-                       rx_info->beacon_energy_c, max_nrg_cck - 6);
+       D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+               rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+               rx_info->beacon_energy_c, max_nrg_cck - 6);
 
        /* Count number of consecutive beacons with fewer-than-desired
         *   false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
                data->num_in_cck_no_fa++;
        else
                data->num_in_cck_no_fa = 0;
-       IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
-                       data->num_in_cck_no_fa);
+       D_CALIB("consecutive bcns with few false alarms = %u\n",
+               data->num_in_cck_no_fa);
 
        /* If we got too many false alarms this time, reduce sensitivity */
-       if ((false_alarms > max_false_alarms) &&
-               (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
-               IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
-                    false_alarms, max_false_alarms);
-               IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
-               data->nrg_curr_state = IWL_FA_TOO_MANY;
+       if (false_alarms > max_false_alarms &&
+           data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+               D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+                       max_false_alarms);
+               D_CALIB("... reducing sensitivity\n");
+               data->nrg_curr_state = IL_FA_TOO_MANY;
                /* Store for "fewer than desired" on later beacon */
                data->nrg_silence_ref = silence_ref;
 
                /* increase energy threshold (reduce nrg value)
                 *   to decrease sensitivity */
                data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
-       /* Else if we got fewer than desired, increase sensitivity */
+               /* Else if we got fewer than desired, increase sensitivity */
        } else if (false_alarms < min_false_alarms) {
-               data->nrg_curr_state = IWL_FA_TOO_FEW;
+               data->nrg_curr_state = IL_FA_TOO_FEW;
 
                /* Compare silence level with silence level for most recent
                 *   healthy number or too many false alarms */
-               data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
-                                                  (s32)silence_ref;
+               data->nrg_auto_corr_silence_diff =
+                   (s32) data->nrg_silence_ref - (s32) silence_ref;
 
-               IWL_DEBUG_CALIB(priv,
-                        "norm FA %u < min FA %u, silence diff %d\n",
-                        false_alarms, min_false_alarms,
-                        data->nrg_auto_corr_silence_diff);
+               D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+                       false_alarms, min_false_alarms,
+                       data->nrg_auto_corr_silence_diff);
 
                /* Increase value to increase sensitivity, but only if:
                 * 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
                 *      from a previous beacon with too many, or healthy # FAs
                 * OR 2) We've seen a lot of beacons (100) with too few
                 *       false alarms */
-               if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
-                       ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
-                       (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+               if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+                   (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+                    data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
 
-                       IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+                       D_CALIB("... increasing sensitivity\n");
                        /* Increase nrg value to increase sensitivity */
                        val = data->nrg_th_cck + NRG_STEP_CCK;
-                       data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+                       data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
                } else {
-                       IWL_DEBUG_CALIB(priv,
-                                        "... but not changing sensitivity\n");
+                       D_CALIB("... but not changing sensitivity\n");
                }
 
-       /* Else we got a healthy number of false alarms, keep status quo */
+               /* Else we got a healthy number of false alarms, keep status quo */
        } else {
-               IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
-               data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+               D_CALIB(" FA in safe zone\n");
+               data->nrg_curr_state = IL_FA_GOOD_RANGE;
 
                /* Store for use in "fewer than desired" with later beacon */
                data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
                /* If previous beacon had too many false alarms,
                 *   give it some extra margin by reducing sensitivity again
                 *   (but don't go below measured energy of desired Rx) */
-               if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
-                       IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+               if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+                       D_CALIB("... increasing margin\n");
                        if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
                                data->nrg_th_cck -= NRG_MARGIN;
                        else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
         * Lower value is higher energy, so we use max()!
         */
        data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
-       IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+       D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
 
        data->nrg_prev_state = data->nrg_curr_state;
 
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
                else {
                        val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
                        data->auto_corr_cck =
-                               min((u32)ranges->auto_corr_max_cck, val);
+                           min((u32) ranges->auto_corr_max_cck, val);
                }
                val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
                data->auto_corr_cck_mrc =
-                       min((u32)ranges->auto_corr_max_cck_mrc, val);
-       } else if ((false_alarms < min_false_alarms) &&
-          ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
-          (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+                   min((u32) ranges->auto_corr_max_cck_mrc, val);
+       } else if (false_alarms < min_false_alarms &&
+                  (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+                   data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
 
                /* Decrease auto_corr values to increase sensitivity */
                val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
-               data->auto_corr_cck =
-                       max((u32)ranges->auto_corr_min_cck, val);
+               data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
                val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
                data->auto_corr_cck_mrc =
-                       max((u32)ranges->auto_corr_min_cck_mrc, val);
+                   max((u32) ranges->auto_corr_min_cck_mrc, val);
        }
 
        return 0;
 }
 
-
-static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
-                                      u32 norm_fa,
-                                      u32 rx_enable_time)
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
 {
        u32 val;
        u32 false_alarms = norm_fa * 200 * 1024;
        u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
        u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
-       struct iwl_sensitivity_data *data = NULL;
-       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+       struct il_sensitivity_data *data = NULL;
+       const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
 
-       data = &(priv->sensitivity_data);
+       data = &(il->sensitivity_data);
 
        /* If we got too many false alarms this time, reduce sensitivity */
        if (false_alarms > max_false_alarms) {
 
-               IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
-                            false_alarms, max_false_alarms);
+               D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+                       max_false_alarms);
 
                val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm =
-                       min((u32)ranges->auto_corr_max_ofdm, val);
+                   min((u32) ranges->auto_corr_max_ofdm, val);
 
                val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm_mrc =
-                       min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+                   min((u32) ranges->auto_corr_max_ofdm_mrc, val);
 
                val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm_x1 =
-                       min((u32)ranges->auto_corr_max_ofdm_x1, val);
+                   min((u32) ranges->auto_corr_max_ofdm_x1, val);
 
                val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm_mrc_x1 =
-                       min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+                   min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
        }
 
        /* Else if we got fewer than desired, increase sensitivity */
        else if (false_alarms < min_false_alarms) {
 
-               IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
-                            false_alarms, min_false_alarms);
+               D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+                       min_false_alarms);
 
                val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm =
-                       max((u32)ranges->auto_corr_min_ofdm, val);
+                   max((u32) ranges->auto_corr_min_ofdm, val);
 
                val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm_mrc =
-                       max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+                   max((u32) ranges->auto_corr_min_ofdm_mrc, val);
 
                val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm_x1 =
-                       max((u32)ranges->auto_corr_min_ofdm_x1, val);
+                   max((u32) ranges->auto_corr_min_ofdm_x1, val);
 
                val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
                data->auto_corr_ofdm_mrc_x1 =
-                       max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+                   max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
        } else {
-               IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
-                        min_false_alarms, false_alarms, max_false_alarms);
+               D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+                       min_false_alarms, false_alarms, max_false_alarms);
        }
        return 0;
 }
 
-static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
-                               struct iwl_sensitivity_data *data,
-                               __le16 *tbl)
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+                                     struct il_sensitivity_data *data,
+                                     __le16 *tbl)
 {
-       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
-                               cpu_to_le16((u16)data->auto_corr_ofdm);
-       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
-                               cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
-       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
-                               cpu_to_le16((u16)data->auto_corr_ofdm_x1);
-       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
-                               cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
-
-       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
-                               cpu_to_le16((u16)data->auto_corr_cck);
-       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
-                               cpu_to_le16((u16)data->auto_corr_cck_mrc);
-
-       tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
-                               cpu_to_le16((u16)data->nrg_th_cck);
-       tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
-                               cpu_to_le16((u16)data->nrg_th_ofdm);
-
-       tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
-                               cpu_to_le16(data->barker_corr_th_min);
-       tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
-                               cpu_to_le16(data->barker_corr_th_min_mrc);
-       tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
-                               cpu_to_le16(data->nrg_th_cca);
-
-       IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
-                       data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
-                       data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
-                       data->nrg_th_ofdm);
-
-       IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
-                       data->auto_corr_cck, data->auto_corr_cck_mrc,
-                       data->nrg_th_cck);
+       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+           cpu_to_le16((u16) data->auto_corr_ofdm);
+       tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+           cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+           cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+       tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+           cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+           cpu_to_le16((u16) data->auto_corr_cck);
+       tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+           cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+       tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+       tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+       tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+           cpu_to_le16(data->barker_corr_th_min);
+       tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+           cpu_to_le16(data->barker_corr_th_min_mrc);
+       tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+       D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+               data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+               data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+               data->nrg_th_ofdm);
+
+       D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+               data->auto_corr_cck_mrc, data->nrg_th_cck);
 }
 
-/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
-static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
 {
-       struct iwl_sensitivity_cmd cmd;
-       struct iwl_sensitivity_data *data = NULL;
-       struct iwl_host_cmd cmd_out = {
-               .id = SENSITIVITY_CMD,
-               .len = sizeof(struct iwl_sensitivity_cmd),
+       struct il_sensitivity_cmd cmd;
+       struct il_sensitivity_data *data = NULL;
+       struct il_host_cmd cmd_out = {
+               .id = C_SENSITIVITY,
+               .len = sizeof(struct il_sensitivity_cmd),
                .flags = CMD_ASYNC,
                .data = &cmd,
        };
 
-       data = &(priv->sensitivity_data);
+       data = &(il->sensitivity_data);
 
        memset(&cmd, 0, sizeof(cmd));
 
-       iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+       il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
 
        /* Update uCode's "work" table, and copy it to DSP */
-       cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+       cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
 
        /* Don't send command to uCode if nothing has changed */
-       if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
-                   sizeof(u16)*HD_TABLE_SIZE)) {
-               IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+       if (!memcmp
+           (&cmd.table[0], &(il->sensitivity_tbl[0]),
+            sizeof(u16) * HD_TBL_SIZE)) {
+               D_CALIB("No change in C_SENSITIVITY\n");
                return 0;
        }
 
        /* Copy table for comparison next time */
-       memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
-              sizeof(u16)*HD_TABLE_SIZE);
+       memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+              sizeof(u16) * HD_TBL_SIZE);
 
-       return iwl_legacy_send_cmd(priv, &cmd_out);
+       return il_send_cmd(il, &cmd_out);
 }
 
-void iwl4965_init_sensitivity(struct iwl_priv *priv)
+void
+il4965_init_sensitivity(struct il_priv *il)
 {
        int ret = 0;
        int i;
-       struct iwl_sensitivity_data *data = NULL;
-       const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+       struct il_sensitivity_data *data = NULL;
+       const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
 
-       if (priv->disable_sens_cal)
+       if (il->disable_sens_cal)
                return;
 
-       IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+       D_CALIB("Start il4965_init_sensitivity\n");
 
        /* Clear driver's sensitivity algo data */
-       data = &(priv->sensitivity_data);
+       data = &(il->sensitivity_data);
 
        if (ranges == NULL)
                return;
 
-       memset(data, 0, sizeof(struct iwl_sensitivity_data));
+       memset(data, 0, sizeof(struct il_sensitivity_data));
 
        data->num_in_cck_no_fa = 0;
-       data->nrg_curr_state = IWL_FA_TOO_MANY;
-       data->nrg_prev_state = IWL_FA_TOO_MANY;
+       data->nrg_curr_state = IL_FA_TOO_MANY;
+       data->nrg_prev_state = IL_FA_TOO_MANY;
        data->nrg_silence_ref = 0;
        data->nrg_silence_idx = 0;
        data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
        for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
                data->nrg_silence_rssi[i] = 0;
 
-       data->auto_corr_ofdm =  ranges->auto_corr_min_ofdm;
+       data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
        data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
-       data->auto_corr_ofdm_x1  = ranges->auto_corr_min_ofdm_x1;
+       data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
        data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
        data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
        data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
        data->last_bad_plcp_cnt_cck = 0;
        data->last_fa_cnt_cck = 0;
 
-       ret |= iwl4965_sensitivity_write(priv);
-       IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+       ret |= il4965_sensitivity_write(il);
+       D_CALIB("<<return 0x%X\n", ret);
 }
 
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
 {
        u32 rx_enable_time;
        u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
        u32 bad_plcp_ofdm;
        u32 norm_fa_ofdm;
        u32 norm_fa_cck;
-       struct iwl_sensitivity_data *data = NULL;
-       struct statistics_rx_non_phy *rx_info;
-       struct statistics_rx_phy *ofdm, *cck;
+       struct il_sensitivity_data *data = NULL;
+       struct stats_rx_non_phy *rx_info;
+       struct stats_rx_phy *ofdm, *cck;
        unsigned long flags;
-       struct statistics_general_data statis;
+       struct stats_general_data statis;
 
-       if (priv->disable_sens_cal)
+       if (il->disable_sens_cal)
                return;
 
-       data = &(priv->sensitivity_data);
+       data = &(il->sensitivity_data);
 
-       if (!iwl_legacy_is_any_associated(priv)) {
-               IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+       if (!il_is_any_associated(il)) {
+               D_CALIB("<< - not associated\n");
                return;
        }
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irqsave(&il->lock, flags);
 
-       rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
-       ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
-       cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+       rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+       ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+       cck = &(((struct il_notif_stats *)resp)->rx.cck);
 
        if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
-               IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
-               spin_unlock_irqrestore(&priv->lock, flags);
+               D_CALIB("<< invalid data.\n");
+               spin_unlock_irqrestore(&il->lock, flags);
                return;
        }
 
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
        bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
 
        statis.beacon_silence_rssi_a =
-                       le32_to_cpu(rx_info->beacon_silence_rssi_a);
+           le32_to_cpu(rx_info->beacon_silence_rssi_a);
        statis.beacon_silence_rssi_b =
-                       le32_to_cpu(rx_info->beacon_silence_rssi_b);
+           le32_to_cpu(rx_info->beacon_silence_rssi_b);
        statis.beacon_silence_rssi_c =
-                       le32_to_cpu(rx_info->beacon_silence_rssi_c);
-       statis.beacon_energy_a =
-                       le32_to_cpu(rx_info->beacon_energy_a);
-       statis.beacon_energy_b =
-                       le32_to_cpu(rx_info->beacon_energy_b);
-       statis.beacon_energy_c =
-                       le32_to_cpu(rx_info->beacon_energy_c);
+           le32_to_cpu(rx_info->beacon_silence_rssi_c);
+       statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+       statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+       statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&il->lock, flags);
 
-       IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+       D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
 
        if (!rx_enable_time) {
-               IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+               D_CALIB("<< RX Enable Time == 0!\n");
                return;
        }
 
-       /* These statistics increase monotonically, and do not reset
+       /* These stats increase monotonically, and do not reset
         *   at each beacon.  Calculate difference from last value, or just
-        *   use the new statistics value if it has reset or wrapped around. */
+        *   use the new stats value if it has reset or wrapped around. */
        if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
                data->last_bad_plcp_cnt_cck = bad_plcp_cck;
        else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
        norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
        norm_fa_cck = fa_cck + bad_plcp_cck;
 
-       IWL_DEBUG_CALIB(priv,
-                        "cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
-                       bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+       D_CALIB("cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
+               bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
 
-       iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
-       iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+       il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+       il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
 
-       iwl4965_sensitivity_write(priv);
+       il4965_sensitivity_write(il);
 }
 
-static inline u8 iwl4965_find_first_chain(u8 mask)
+static inline u8
+il4965_find_first_chain(u8 mask)
 {
        if (mask & ANT_A)
                return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
  * disconnected.
  */
 static void
-iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
-                                    struct iwl_chain_noise_data *data)
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+                           struct il_chain_noise_data *data)
 {
        u32 active_chains = 0;
        u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
        u8 first_chain;
        u16 i = 0;
 
-       average_sig[0] = data->chain_signal_a /
-                        priv->cfg->base_params->chain_noise_num_beacons;
-       average_sig[1] = data->chain_signal_b /
-                        priv->cfg->base_params->chain_noise_num_beacons;
-       average_sig[2] = data->chain_signal_c /
-                        priv->cfg->base_params->chain_noise_num_beacons;
+       average_sig[0] =
+           data->chain_signal_a /
+           il->cfg->base_params->chain_noise_num_beacons;
+       average_sig[1] =
+           data->chain_signal_b /
+           il->cfg->base_params->chain_noise_num_beacons;
+       average_sig[2] =
+           data->chain_signal_c /
+           il->cfg->base_params->chain_noise_num_beacons;
 
        if (average_sig[0] >= average_sig[1]) {
                max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
                active_chains = (1 << max_average_sig_antenna_i);
        }
 
-       IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
-                    average_sig[0], average_sig[1], average_sig[2]);
-       IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
-                    max_average_sig, max_average_sig_antenna_i);
+       D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+               average_sig[2]);
+       D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+               max_average_sig_antenna_i);
 
        /* Compare signal strengths for all 3 receivers. */
        for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
                                data->disconn_array[i] = 1;
                        else
                                active_chains |= (1 << i);
-                       IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
-                            "disconn_array[i] = %d\n",
-                            i, rssi_delta, data->disconn_array[i]);
+                       D_CALIB("i = %d  rssiDelta = %d  "
+                               "disconn_array[i] = %d\n", i, rssi_delta,
+                               data->disconn_array[i]);
                }
        }
 
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
         * To be safe, simply mask out any chains that we know
         * are not on the device.
         */
-       active_chains &= priv->hw_params.valid_rx_ant;
+       active_chains &= il->hw_params.valid_rx_ant;
 
        num_tx_chains = 0;
        for (i = 0; i < NUM_RX_CHAINS; i++) {
                /* loops on all the bits of
-                * priv->hw_setting.valid_tx_ant */
+                * il->hw_setting.valid_tx_ant */
                u8 ant_msk = (1 << i);
-               if (!(priv->hw_params.valid_tx_ant & ant_msk))
+               if (!(il->hw_params.valid_tx_ant & ant_msk))
                        continue;
 
                num_tx_chains++;
                if (data->disconn_array[i] == 0)
                        /* there is a Tx antenna connected */
                        break;
-               if (num_tx_chains == priv->hw_params.tx_chains_num &&
+               if (num_tx_chains == il->hw_params.tx_chains_num &&
                    data->disconn_array[i]) {
                        /*
                         * If all chains are disconnected
                         * connect the first valid tx chain
                         */
                        first_chain =
-                       iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+                           il4965_find_first_chain(il->cfg->valid_tx_ant);
                        data->disconn_array[first_chain] = 0;
                        active_chains |= BIT(first_chain);
-                       IWL_DEBUG_CALIB(priv,
-                                       "All Tx chains are disconnected W/A - declare %d as connected\n",
-                                       first_chain);
+                       D_CALIB("All Tx chains are disconnected"
+                               "- declare %d as connected\n", first_chain);
                        break;
                }
        }
 
-       if (active_chains != priv->hw_params.valid_rx_ant &&
-           active_chains != priv->chain_noise_data.active_chains)
-               IWL_DEBUG_CALIB(priv,
-                               "Detected that not all antennas are connected! "
-                               "Connected: %#x, valid: %#x.\n",
-                               active_chains, priv->hw_params.valid_rx_ant);
+       if (active_chains != il->hw_params.valid_rx_ant &&
+           active_chains != il->chain_noise_data.active_chains)
+               D_CALIB("Detected that not all antennas are connected! "
+                       "Connected: %#x, valid: %#x.\n", active_chains,
+                       il->hw_params.valid_rx_ant);
 
        /* Save for use within RXON, TX, SCAN commands, etc. */
        data->active_chains = active_chains;
-       IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
-                       active_chains);
+       D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
 }
 
-static void iwl4965_gain_computation(struct iwl_priv *priv,
-               u32 *average_noise,
-               u16 min_average_noise_antenna_i,
-               u32 min_average_noise,
-               u8 default_chain)
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+                       u16 min_average_noise_antenna_i, u32 min_average_noise,
+                       u8 default_chain)
 {
        int i, ret;
-       struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+       struct il_chain_noise_data *data = &il->chain_noise_data;
 
        data->delta_gain_code[min_average_noise_antenna_i] = 0;
 
        for (i = default_chain; i < NUM_RX_CHAINS; i++) {
                s32 delta_g = 0;
 
-               if (!(data->disconn_array[i]) &&
-                   (data->delta_gain_code[i] ==
-                            CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+               if (!data->disconn_array[i] &&
+                   data->delta_gain_code[i] ==
+                   CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
                        delta_g = average_noise[i] - min_average_noise;
-                       data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+                       data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
                        data->delta_gain_code[i] =
-                               min(data->delta_gain_code[i],
+                           min(data->delta_gain_code[i],
                                (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
 
                        data->delta_gain_code[i] =
-                               (data->delta_gain_code[i] | (1 << 2));
+                           (data->delta_gain_code[i] | (1 << 2));
                } else {
                        data->delta_gain_code[i] = 0;
                }
        }
-       IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
-                    data->delta_gain_code[0],
-                    data->delta_gain_code[1],
-                    data->delta_gain_code[2]);
+       D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+               data->delta_gain_code[1], data->delta_gain_code[2]);
 
        /* Differential gain gets sent to uCode only once */
        if (!data->radio_write) {
-               struct iwl_calib_diff_gain_cmd cmd;
+               struct il_calib_diff_gain_cmd cmd;
                data->radio_write = 1;
 
                memset(&cmd, 0, sizeof(cmd));
-               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+               cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
                cmd.diff_gain_a = data->delta_gain_code[0];
                cmd.diff_gain_b = data->delta_gain_code[1];
                cmd.diff_gain_c = data->delta_gain_code[2];
-               ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-                                     sizeof(cmd), &cmd);
+               ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
                if (ret)
-                       IWL_DEBUG_CALIB(priv, "fail sending cmd "
-                                    "REPLY_PHY_CALIBRATION_CMD\n");
+                       D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
 
                /* TODO we might want recalculate
                 * rx_chain in rxon cmd */
 
                /* Mark so we run this algo only once! */
-               data->state = IWL_CHAIN_NOISE_CALIBRATED;
+               data->state = IL_CHAIN_NOISE_CALIBRATED;
        }
 }
 
-
-
 /*
- * Accumulate 16 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise stats for each of
  *   3 receivers/antennas/rx-chains, then figure out:
  * 1)  Which antennas are connected.
  * 2)  Differential rx gain settings to balance the 3 receivers.
  */
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
 {
-       struct iwl_chain_noise_data *data = NULL;
+       struct il_chain_noise_data *data = NULL;
 
        u32 chain_noise_a;
        u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        u32 chain_sig_a;
        u32 chain_sig_b;
        u32 chain_sig_c;
-       u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
-       u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+       u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+       u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
        u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
        u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
        u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        u8 rxon_band24;
        u8 stat_band24;
        unsigned long flags;
-       struct statistics_rx_non_phy *rx_info;
+       struct stats_rx_non_phy *rx_info;
 
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct il_rxon_context *ctx = &il->ctx;
 
-       if (priv->disable_chain_noise_cal)
+       if (il->disable_chain_noise_cal)
                return;
 
-       data = &(priv->chain_noise_data);
+       data = &(il->chain_noise_data);
 
        /*
         * Accumulate just the first "chain_noise_num_beacons" after
         * the first association, then we're done forever.
         */
-       if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
-               if (data->state == IWL_CHAIN_NOISE_ALIVE)
-                       IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+       if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+               if (data->state == IL_CHAIN_NOISE_ALIVE)
+                       D_CALIB("Wait for noise calib reset\n");
                return;
        }
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irqsave(&il->lock, flags);
 
-       rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
-                     rx.general);
+       rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
 
        if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
-               IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
-               spin_unlock_irqrestore(&priv->lock, flags);
+               D_CALIB(" << Interference data unavailable\n");
+               spin_unlock_irqrestore(&il->lock, flags);
                return;
        }
 
        rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
        rxon_chnum = le16_to_cpu(ctx->staging.channel);
 
-       stat_band24 = !!(((struct iwl_notif_statistics *)
-                        stat_resp)->flag &
-                        STATISTICS_REPLY_FLG_BAND_24G_MSK);
-       stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
-                                stat_resp)->flag) >> 16;
+       stat_band24 =
+           !!(((struct il_notif_stats *)stat_resp)->
+              flag & STATS_REPLY_FLG_BAND_24G_MSK);
+       stat_chnum =
+           le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
 
        /* Make sure we accumulate data for just the associated channel
         *   (even if scanning). */
-       if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
-               IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
-                               rxon_chnum, rxon_band24);
-               spin_unlock_irqrestore(&priv->lock, flags);
+       if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+               D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+                       rxon_band24);
+               spin_unlock_irqrestore(&il->lock, flags);
                return;
        }
 
        /*
-        *  Accumulate beacon statistics values across
+        *  Accumulate beacon stats values across
         * "chain_noise_num_beacons"
         */
-       chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
-                               IN_BAND_FILTER;
-       chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
-                               IN_BAND_FILTER;
-       chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
-                               IN_BAND_FILTER;
+       chain_noise_a =
+           le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+       chain_noise_b =
+           le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+       chain_noise_c =
+           le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
 
        chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
        chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
        chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
 
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irqrestore(&il->lock, flags);
 
        data->beacon_count++;
 
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
        data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
        data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
 
-       IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
-                       rxon_chnum, rxon_band24, data->beacon_count);
-       IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
-                       chain_sig_a, chain_sig_b, chain_sig_c);
-       IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
-                       chain_noise_a, chain_noise_b, chain_noise_c);
+       D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+               data->beacon_count);
+       D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+               chain_sig_c);
+       D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+               chain_noise_c);
 
        /* If this is the "chain_noise_num_beacons", determine:
         * 1)  Disconnected antennas (using signal strengths)
         * 2)  Differential gain (using silence noise) to balance receivers */
-       if (data->beacon_count !=
-               priv->cfg->base_params->chain_noise_num_beacons)
+       if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
                return;
 
        /* Analyze signal for disconnected antenna */
-       iwl4965_find_disconn_antenna(priv, average_sig, data);
+       il4965_find_disconn_antenna(il, average_sig, data);
 
        /* Analyze noise for rx balance */
-       average_noise[0] = data->chain_noise_a /
-                          priv->cfg->base_params->chain_noise_num_beacons;
-       average_noise[1] = data->chain_noise_b /
-                          priv->cfg->base_params->chain_noise_num_beacons;
-       average_noise[2] = data->chain_noise_c /
-                          priv->cfg->base_params->chain_noise_num_beacons;
+       average_noise[0] =
+           data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+       average_noise[1] =
+           data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+       average_noise[2] =
+           data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
 
        for (i = 0; i < NUM_RX_CHAINS; i++) {
-               if (!(data->disconn_array[i]) &&
-                  (average_noise[i] <= min_average_noise)) {
+               if (!data->disconn_array[i] &&
+                   average_noise[i] <= min_average_noise) {
                        /* This means that chain i is active and has
                         * lower noise values so far: */
                        min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
                }
        }
 
-       IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
-                       average_noise[0], average_noise[1],
-                       average_noise[2]);
+       D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+               average_noise[1], average_noise[2]);
 
-       IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
-                       min_average_noise, min_average_noise_antenna_i);
+       D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+               min_average_noise_antenna_i);
 
-       iwl4965_gain_computation(priv, average_noise,
-                       min_average_noise_antenna_i, min_average_noise,
-                       iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+       il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+                               min_average_noise,
+                               il4965_find_first_chain(il->cfg->valid_rx_ant));
 
        /* Some power changes may have been made during the calibration.
         * Update and commit the RXON
         */
-       if (priv->cfg->ops->lib->update_chain_flags)
-               priv->cfg->ops->lib->update_chain_flags(priv);
+       if (il->cfg->ops->lib->update_chain_flags)
+               il->cfg->ops->lib->update_chain_flags(il);
 
-       data->state = IWL_CHAIN_NOISE_DONE;
-       iwl_legacy_power_update_mode(priv, false);
+       data->state = IL_CHAIN_NOISE_DONE;
+       il_power_update_mode(il, false);
 }
 
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+void
+il4965_reset_run_time_calib(struct il_priv *il)
 {
        int i;
-       memset(&(priv->sensitivity_data), 0,
-              sizeof(struct iwl_sensitivity_data));
-       memset(&(priv->chain_noise_data), 0,
-              sizeof(struct iwl_chain_noise_data));
+       memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+       memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
        for (i = 0; i < NUM_RX_CHAINS; i++)
-               priv->chain_noise_data.delta_gain_code[i] =
-                               CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+               il->chain_noise_data.delta_gain_code[i] =
+                   CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
 
-       /* Ask for statistics now, the uCode will send notification
+       /* Ask for stats now, the uCode will send notification
         * periodically after association */
-       iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+       il_send_stats_request(il, CMD_ASYNC, true);
 }
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644 (file)
index 0000000..98ec39f
--- /dev/null
@@ -0,0 +1,746 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+*  Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = "  %-30s %10u\n";
+static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char *fmt_header =
+    "%-32s    current  cumulative       delta         max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+       int p = 0;
+       u32 flag;
+
+       flag = le32_to_cpu(il->_4965.stats.flag);
+
+       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+       if (flag & UCODE_STATS_CLEAR_MSK)
+               p += scnprintf(buf + p, bufsz - p,
+                              "\tStatistics have been cleared\n");
+       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+                      (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+                      "5.2 GHz");
+       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+                      (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+                      "disabled");
+
+       return p;
+}
+
+ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz =
+           sizeof(struct stats_rx_phy) * 40 +
+           sizeof(struct stats_rx_non_phy) * 40 +
+           sizeof(struct stats_rx_ht_phy) * 40 + 400;
+       ssize_t ret;
+       struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+       struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+       struct stats_rx_non_phy *general, *accum_general;
+       struct stats_rx_non_phy *delta_general, *max_general;
+       struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * the statistic information display here is based on
+        * the last stats notification from uCode
+        * might not reflect the current uCode activity
+        */
+       ofdm = &il->_4965.stats.rx.ofdm;
+       cck = &il->_4965.stats.rx.cck;
+       general = &il->_4965.stats.rx.general;
+       ht = &il->_4965.stats.rx.ofdm_ht;
+       accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+       accum_cck = &il->_4965.accum_stats.rx.cck;
+       accum_general = &il->_4965.accum_stats.rx.general;
+       accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+       delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+       delta_cck = &il->_4965.delta_stats.rx.cck;
+       delta_general = &il->_4965.delta_stats.rx.general;
+       delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+       max_ofdm = &il->_4965.max_delta.rx.ofdm;
+       max_cck = &il->_4965.max_delta.rx.cck;
+       max_general = &il->_4965.max_delta.rx.general;
+       max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+       pos += il4965_stats_flag(il, buf, bufsz);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_header,
+                     "Statistics_Rx - OFDM:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+                     le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+                     delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+                     le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+                     delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+                     le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+                     delta_ofdm->plcp_err, max_ofdm->plcp_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+                     le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+                     delta_ofdm->crc32_err, max_ofdm->crc32_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+                     le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+                     delta_ofdm->overrun_err, max_ofdm->overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+                     le32_to_cpu(ofdm->early_overrun_err),
+                     accum_ofdm->early_overrun_err,
+                     delta_ofdm->early_overrun_err,
+                     max_ofdm->early_overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+                     le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+                     delta_ofdm->crc32_good, max_ofdm->crc32_good);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+                     le32_to_cpu(ofdm->false_alarm_cnt),
+                     accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+                     max_ofdm->false_alarm_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+                     le32_to_cpu(ofdm->fina_sync_err_cnt),
+                     accum_ofdm->fina_sync_err_cnt,
+                     delta_ofdm->fina_sync_err_cnt,
+                     max_ofdm->fina_sync_err_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+                     le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+                     delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+                     le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+                     delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+                     le32_to_cpu(ofdm->unresponded_rts),
+                     accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+                     max_ofdm->unresponded_rts);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+                     le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+                     accum_ofdm->rxe_frame_limit_overrun,
+                     delta_ofdm->rxe_frame_limit_overrun,
+                     max_ofdm->rxe_frame_limit_overrun);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+                     le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+                     delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+                     le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+                     delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+                     le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+                     accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+                     max_ofdm->sent_ba_rsp_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+                     le32_to_cpu(ofdm->dsp_self_kill),
+                     accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+                     max_ofdm->dsp_self_kill);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+                     le32_to_cpu(ofdm->mh_format_err),
+                     accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+                     max_ofdm->mh_format_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "re_acq_main_rssi_sum:",
+                     le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+                     accum_ofdm->re_acq_main_rssi_sum,
+                     delta_ofdm->re_acq_main_rssi_sum,
+                     max_ofdm->re_acq_main_rssi_sum);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_header,
+                     "Statistics_Rx - CCK:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+                     le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+                     delta_cck->ina_cnt, max_cck->ina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+                     le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+                     delta_cck->fina_cnt, max_cck->fina_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+                     le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+                     delta_cck->plcp_err, max_cck->plcp_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+                     le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+                     delta_cck->crc32_err, max_cck->crc32_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+                     le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+                     delta_cck->overrun_err, max_cck->overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+                     le32_to_cpu(cck->early_overrun_err),
+                     accum_cck->early_overrun_err,
+                     delta_cck->early_overrun_err, max_cck->early_overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+                     le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+                     delta_cck->crc32_good, max_cck->crc32_good);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+                     le32_to_cpu(cck->false_alarm_cnt),
+                     accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+                     max_cck->false_alarm_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+                     le32_to_cpu(cck->fina_sync_err_cnt),
+                     accum_cck->fina_sync_err_cnt,
+                     delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+                     le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+                     delta_cck->sfd_timeout, max_cck->sfd_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+                     le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+                     delta_cck->fina_timeout, max_cck->fina_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+                     le32_to_cpu(cck->unresponded_rts),
+                     accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+                     max_cck->unresponded_rts);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+                     le32_to_cpu(cck->rxe_frame_limit_overrun),
+                     accum_cck->rxe_frame_limit_overrun,
+                     delta_cck->rxe_frame_limit_overrun,
+                     max_cck->rxe_frame_limit_overrun);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+                     le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+                     delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+                     le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+                     delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+                     le32_to_cpu(cck->sent_ba_rsp_cnt),
+                     accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+                     max_cck->sent_ba_rsp_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+                     le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+                     delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+                     le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+                     delta_cck->mh_format_err, max_cck->mh_format_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "re_acq_main_rssi_sum:",
+                     le32_to_cpu(cck->re_acq_main_rssi_sum),
+                     accum_cck->re_acq_main_rssi_sum,
+                     delta_cck->re_acq_main_rssi_sum,
+                     max_cck->re_acq_main_rssi_sum);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_header,
+                     "Statistics_Rx - GENERAL:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+                     le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+                     delta_general->bogus_cts, max_general->bogus_cts);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+                     le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+                     delta_general->bogus_ack, max_general->bogus_ack);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+                     le32_to_cpu(general->non_bssid_frames),
+                     accum_general->non_bssid_frames,
+                     delta_general->non_bssid_frames,
+                     max_general->non_bssid_frames);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+                     le32_to_cpu(general->filtered_frames),
+                     accum_general->filtered_frames,
+                     delta_general->filtered_frames,
+                     max_general->filtered_frames);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+                     le32_to_cpu(general->non_channel_beacons),
+                     accum_general->non_channel_beacons,
+                     delta_general->non_channel_beacons,
+                     max_general->non_channel_beacons);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+                     le32_to_cpu(general->channel_beacons),
+                     accum_general->channel_beacons,
+                     delta_general->channel_beacons,
+                     max_general->channel_beacons);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+                     le32_to_cpu(general->num_missed_bcon),
+                     accum_general->num_missed_bcon,
+                     delta_general->num_missed_bcon,
+                     max_general->num_missed_bcon);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "adc_rx_saturation_time:",
+                     le32_to_cpu(general->adc_rx_saturation_time),
+                     accum_general->adc_rx_saturation_time,
+                     delta_general->adc_rx_saturation_time,
+                     max_general->adc_rx_saturation_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "ina_detect_search_tm:",
+                     le32_to_cpu(general->ina_detection_search_time),
+                     accum_general->ina_detection_search_time,
+                     delta_general->ina_detection_search_time,
+                     max_general->ina_detection_search_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "beacon_silence_rssi_a:",
+                     le32_to_cpu(general->beacon_silence_rssi_a),
+                     accum_general->beacon_silence_rssi_a,
+                     delta_general->beacon_silence_rssi_a,
+                     max_general->beacon_silence_rssi_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "beacon_silence_rssi_b:",
+                     le32_to_cpu(general->beacon_silence_rssi_b),
+                     accum_general->beacon_silence_rssi_b,
+                     delta_general->beacon_silence_rssi_b,
+                     max_general->beacon_silence_rssi_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "beacon_silence_rssi_c:",
+                     le32_to_cpu(general->beacon_silence_rssi_c),
+                     accum_general->beacon_silence_rssi_c,
+                     delta_general->beacon_silence_rssi_c,
+                     max_general->beacon_silence_rssi_c);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "interference_data_flag:",
+                     le32_to_cpu(general->interference_data_flag),
+                     accum_general->interference_data_flag,
+                     delta_general->interference_data_flag,
+                     max_general->interference_data_flag);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+                     le32_to_cpu(general->channel_load),
+                     accum_general->channel_load, delta_general->channel_load,
+                     max_general->channel_load);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+                     le32_to_cpu(general->dsp_false_alarms),
+                     accum_general->dsp_false_alarms,
+                     delta_general->dsp_false_alarms,
+                     max_general->dsp_false_alarms);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+                     le32_to_cpu(general->beacon_rssi_a),
+                     accum_general->beacon_rssi_a,
+                     delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+                     le32_to_cpu(general->beacon_rssi_b),
+                     accum_general->beacon_rssi_b,
+                     delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+                     le32_to_cpu(general->beacon_rssi_c),
+                     accum_general->beacon_rssi_c,
+                     delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+                     le32_to_cpu(general->beacon_energy_a),
+                     accum_general->beacon_energy_a,
+                     delta_general->beacon_energy_a,
+                     max_general->beacon_energy_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+                     le32_to_cpu(general->beacon_energy_b),
+                     accum_general->beacon_energy_b,
+                     delta_general->beacon_energy_b,
+                     max_general->beacon_energy_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+                     le32_to_cpu(general->beacon_energy_c),
+                     accum_general->beacon_energy_c,
+                     delta_general->beacon_energy_c,
+                     max_general->beacon_energy_c);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_header,
+                     "Statistics_Rx - OFDM_HT:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+                     le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+                     delta_ht->plcp_err, max_ht->plcp_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+                     le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+                     delta_ht->overrun_err, max_ht->overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+                     le32_to_cpu(ht->early_overrun_err),
+                     accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+                     max_ht->early_overrun_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+                     le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+                     delta_ht->crc32_good, max_ht->crc32_good);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+                     le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+                     delta_ht->crc32_err, max_ht->crc32_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+                     le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+                     delta_ht->mh_format_err, max_ht->mh_format_err);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+                     le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+                     delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+                     le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+                     delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+                     le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+                     delta_ht->agg_cnt, max_ht->agg_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+                     le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+                     delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+       ssize_t ret;
+       struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /* the statistic information display here is based on
+        * the last stats notification from uCode
+        * might not reflect the current uCode activity
+        */
+       tx = &il->_4965.stats.tx;
+       accum_tx = &il->_4965.accum_stats.tx;
+       delta_tx = &il->_4965.delta_stats.tx;
+       max_tx = &il->_4965.max_delta.tx;
+
+       pos += il4965_stats_flag(il, buf, bufsz);
+       pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+                     le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+                     delta_tx->preamble_cnt, max_tx->preamble_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+                     le32_to_cpu(tx->rx_detected_cnt),
+                     accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+                     max_tx->rx_detected_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+                     le32_to_cpu(tx->bt_prio_defer_cnt),
+                     accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+                     max_tx->bt_prio_defer_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+                     le32_to_cpu(tx->bt_prio_kill_cnt),
+                     accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+                     max_tx->bt_prio_kill_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+                     le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+                     delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+                     le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+                     delta_tx->cts_timeout, max_tx->cts_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+                     le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+                     delta_tx->ack_timeout, max_tx->ack_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+                     le32_to_cpu(tx->expected_ack_cnt),
+                     accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+                     max_tx->expected_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+                     le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+                     delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+                     le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+                     delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "abort_nxt_frame_mismatch:",
+                     le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+                     accum_tx->burst_abort_next_frame_mismatch_cnt,
+                     delta_tx->burst_abort_next_frame_mismatch_cnt,
+                     max_tx->burst_abort_next_frame_mismatch_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "abort_missing_nxt_frame:",
+                     le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+                     accum_tx->burst_abort_missing_next_frame_cnt,
+                     delta_tx->burst_abort_missing_next_frame_cnt,
+                     max_tx->burst_abort_missing_next_frame_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "cts_timeout_collision:",
+                     le32_to_cpu(tx->cts_timeout_collision),
+                     accum_tx->cts_timeout_collision,
+                     delta_tx->cts_timeout_collision,
+                     max_tx->cts_timeout_collision);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "ack_ba_timeout_collision:",
+                     le32_to_cpu(tx->ack_or_ba_timeout_collision),
+                     accum_tx->ack_or_ba_timeout_collision,
+                     delta_tx->ack_or_ba_timeout_collision,
+                     max_tx->ack_or_ba_timeout_collision);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+                     le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+                     delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "agg ba_resched_frames:",
+                     le32_to_cpu(tx->agg.ba_reschedule_frames),
+                     accum_tx->agg.ba_reschedule_frames,
+                     delta_tx->agg.ba_reschedule_frames,
+                     max_tx->agg.ba_reschedule_frames);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "agg scd_query_agg_frame:",
+                     le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+                     accum_tx->agg.scd_query_agg_frame_cnt,
+                     delta_tx->agg.scd_query_agg_frame_cnt,
+                     max_tx->agg.scd_query_agg_frame_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "agg scd_query_no_agg:",
+                     le32_to_cpu(tx->agg.scd_query_no_agg),
+                     accum_tx->agg.scd_query_no_agg,
+                     delta_tx->agg.scd_query_no_agg,
+                     max_tx->agg.scd_query_no_agg);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+                     le32_to_cpu(tx->agg.scd_query_agg),
+                     accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+                     max_tx->agg.scd_query_agg);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "agg scd_query_mismatch:",
+                     le32_to_cpu(tx->agg.scd_query_mismatch),
+                     accum_tx->agg.scd_query_mismatch,
+                     delta_tx->agg.scd_query_mismatch,
+                     max_tx->agg.scd_query_mismatch);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+                     le32_to_cpu(tx->agg.frame_not_ready),
+                     accum_tx->agg.frame_not_ready,
+                     delta_tx->agg.frame_not_ready,
+                     max_tx->agg.frame_not_ready);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+                     le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+                     delta_tx->agg.underrun, max_tx->agg.underrun);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+                     le32_to_cpu(tx->agg.bt_prio_kill),
+                     accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+                     max_tx->agg.bt_prio_kill);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+                     le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+                     accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+                     max_tx->agg.rx_ba_rsp_cnt);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char *buf;
+       int bufsz = sizeof(struct stats_general) * 10 + 300;
+       ssize_t ret;
+       struct stats_general_common *general, *accum_general;
+       struct stats_general_common *delta_general, *max_general;
+       struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+       struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       /* the statistic information display here is based on
+        * the last stats notification from uCode
+        * might not reflect the current uCode activity
+        */
+       general = &il->_4965.stats.general.common;
+       dbg = &il->_4965.stats.general.common.dbg;
+       div = &il->_4965.stats.general.common.div;
+       accum_general = &il->_4965.accum_stats.general.common;
+       accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+       accum_div = &il->_4965.accum_stats.general.common.div;
+       delta_general = &il->_4965.delta_stats.general.common;
+       max_general = &il->_4965.max_delta.general.common;
+       delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+       max_dbg = &il->_4965.max_delta.general.common.dbg;
+       delta_div = &il->_4965.delta_stats.general.common.div;
+       max_div = &il->_4965.max_delta.general.common.div;
+
+       pos += il4965_stats_flag(il, buf, bufsz);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_header,
+                     "Statistics_General:");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+                     le32_to_cpu(general->temperature));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+                     le32_to_cpu(general->ttl_timestamp));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+                     le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+                     delta_dbg->burst_check, max_dbg->burst_check);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+                     le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+                     delta_dbg->burst_count, max_dbg->burst_count);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table,
+                     "wait_for_silence_timeout_count:",
+                     le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+                     accum_dbg->wait_for_silence_timeout_cnt,
+                     delta_dbg->wait_for_silence_timeout_cnt,
+                     max_dbg->wait_for_silence_timeout_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+                     le32_to_cpu(general->sleep_time),
+                     accum_general->sleep_time, delta_general->sleep_time,
+                     max_general->sleep_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+                     le32_to_cpu(general->slots_out), accum_general->slots_out,
+                     delta_general->slots_out, max_general->slots_out);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+                     le32_to_cpu(general->slots_idle),
+                     accum_general->slots_idle, delta_general->slots_idle,
+                     max_general->slots_idle);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+                     le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+                     delta_div->tx_on_a, max_div->tx_on_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+                     le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+                     delta_div->tx_on_b, max_div->tx_on_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+                     le32_to_cpu(div->exec_time), accum_div->exec_time,
+                     delta_div->exec_time, max_div->exec_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+                     le32_to_cpu(div->probe_time), accum_div->probe_time,
+                     delta_div->probe_time, max_div->probe_time);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+                     le32_to_cpu(general->rx_enable_counter),
+                     accum_general->rx_enable_counter,
+                     delta_general->rx_enable_counter,
+                     max_general->rx_enable_counter);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+                     le32_to_cpu(general->num_of_sos_states),
+                     accum_general->num_of_sos_states,
+                     delta_general->num_of_sos_states,
+                     max_general->num_of_sos_states);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644 (file)
index 0000000..4aaef41
--- /dev/null
@@ -0,0 +1,6536 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME        "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION        "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION     IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+       if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+               IL_ERR("Tx flush command to flush out all frames\n");
+               if (!test_bit(S_EXIT_PENDING, &il->status))
+                       queue_work(il->workqueue, &il->tx_flush);
+       }
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+       .amsdu_size_8K = 1,
+       .restart_fw = 1,
+       /* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+       unsigned long flags;
+       int i;
+       spin_lock_irqsave(&rxq->lock, flags);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+               /* In the reset function, these buffers may have been allocated
+                * to an SKB, so we need to unmap and free potential storage */
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+                                      PAGE_SIZE << il->hw_params.rx_page_order,
+                                      PCI_DMA_FROMDEVICE);
+                       __il_free_pages(il, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+       }
+
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
+               rxq->queue[i] = NULL;
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+       u32 rb_size;
+       const u32 rfdnlog = RX_QUEUE_SIZE_LOG;  /* 256 RBDs */
+       u32 rb_timeout = 0;
+
+       if (il->cfg->mod_params->amsdu_size_8K)
+               rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+       else
+               rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+       /* Stop Rx DMA */
+       il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+       /* Reset driver's Rx queue write idx */
+       il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+       /* Tell device where to find RBD circular buffer in DRAM */
+       il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+       /* Tell device where in DRAM to update its Rx status */
+       il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+       /* Enable Rx DMA
+        * Direct rx interrupts to hosts
+        * Rx buffer size 4 or 8k
+        * RB timeout 0x10
+        * 256 RBDs
+        */
+       il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+             FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+             FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+             FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+             rb_size |
+             (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+             (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+       return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+               if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+                       il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+                                              APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+                                              ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+       il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+                             APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+                             ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+       unsigned long flags;
+       struct il_rx_queue *rxq = &il->rxq;
+       int ret;
+
+       /* nic_init */
+       spin_lock_irqsave(&il->lock, flags);
+       il->cfg->ops->lib->apm_ops.init(il);
+
+       /* Set interrupt coalescing calibration timer to default (512 usecs) */
+       il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       il4965_set_pwr_vmain(il);
+
+       il->cfg->ops->lib->apm_ops.config(il);
+
+       /* Allocate the RX queue, or reset if it is already allocated */
+       if (!rxq->bd) {
+               ret = il_rx_queue_alloc(il);
+               if (ret) {
+                       IL_ERR("Unable to initialize Rx queue\n");
+                       return -ENOMEM;
+               }
+       } else
+               il4965_rx_queue_reset(il, rxq);
+
+       il4965_rx_replenish(il);
+
+       il4965_rx_init(il, rxq);
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       rxq->need_update = 1;
+       il_rx_queue_update_write_ptr(il, rxq);
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       /* Allocate or reset and init all Tx and Command queues */
+       if (!il->txq) {
+               ret = il4965_txq_ctx_alloc(il);
+               if (ret)
+                       return ret;
+       } else
+               il4965_txq_ctx_reset(il);
+
+       set_bit(S_INIT, &il->status);
+
+       return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+       return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+       struct il_rx_queue *rxq = &il->rxq;
+       struct list_head *element;
+       struct il_rx_buf *rxb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rxq->lock, flags);
+       while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+               /* The overwritten rxb must be a used one */
+               rxb = rxq->queue[rxq->write];
+               BUG_ON(rxb && rxb->page);
+
+               /* Get next free Rx buffer, remove from free list */
+               element = rxq->rx_free.next;
+               rxb = list_entry(element, struct il_rx_buf, list);
+               list_del(element);
+
+               /* Point to Rx buffer via next RBD in circular buffer */
+               rxq->bd[rxq->write] =
+                   il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+               rxq->queue[rxq->write] = rxb;
+               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+               rxq->free_count--;
+       }
+       spin_unlock_irqrestore(&rxq->lock, flags);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               queue_work(il->workqueue, &il->rx_replenish);
+
+       /* If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8. */
+       if (rxq->write_actual != (rxq->write & ~0x7)) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               rxq->need_update = 1;
+               spin_unlock_irqrestore(&rxq->lock, flags);
+               il_rx_queue_update_write_ptr(il, rxq);
+       }
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+       struct il_rx_queue *rxq = &il->rxq;
+       struct list_head *element;
+       struct il_rx_buf *rxb;
+       struct page *page;
+       unsigned long flags;
+       gfp_t gfp_mask = priority;
+
+       while (1) {
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (il->hw_params.rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
+               /* Alloc a new receive buffer */
+               page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               D_INFO("alloc_pages failed, " "order: %d\n",
+                                      il->hw_params.rx_page_order);
+
+                       if (rxq->free_count <= RX_LOW_WATERMARK &&
+                           net_ratelimit())
+                               IL_ERR("Failed to alloc_pages with %s. "
+                                      "Only %u free buffers remaining.\n",
+                                      priority ==
+                                      GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+                                      rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
+                       return;
+               }
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               if (list_empty(&rxq->rx_used)) {
+                       spin_unlock_irqrestore(&rxq->lock, flags);
+                       __free_pages(page, il->hw_params.rx_page_order);
+                       return;
+               }
+               element = rxq->rx_used.next;
+               rxb = list_entry(element, struct il_rx_buf, list);
+               list_del(element);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               BUG_ON(rxb->page);
+               rxb->page = page;
+               /* Get physical address of the RB */
+               rxb->page_dma =
+                   pci_map_page(il->pci_dev, page, 0,
+                                PAGE_SIZE << il->hw_params.rx_page_order,
+                                PCI_DMA_FROMDEVICE);
+               /* dma address must be no more than 36 bits */
+               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+               /* and also 256 byte aligned! */
+               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+               spin_lock_irqsave(&rxq->lock, flags);
+
+               list_add_tail(&rxb->list, &rxq->rx_free);
+               rxq->free_count++;
+               il->alloc_rxb_page++;
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+       }
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+       unsigned long flags;
+
+       il4965_rx_allocate(il, GFP_KERNEL);
+
+       spin_lock_irqsave(&il->lock, flags);
+       il4965_rx_queue_restock(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+       il4965_rx_allocate(il, GFP_ATOMIC);
+
+       il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+       int i;
+       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+                                      PAGE_SIZE << il->hw_params.rx_page_order,
+                                      PCI_DMA_FROMDEVICE);
+                       __il_free_pages(il, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
+               }
+       }
+
+       dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+       dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+                         rxq->rb_stts, rxq->rb_stts_dma);
+       rxq->bd = NULL;
+       rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+
+       /* stop Rx DMA */
+       il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+       il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+                   FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+       return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+       int idx = 0;
+       int band_offset = 0;
+
+       /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = (rate_n_flags & 0xff);
+               return idx;
+               /* Legacy rate format, search for match in table */
+       } else {
+               if (band == IEEE80211_BAND_5GHZ)
+                       band_offset = IL_FIRST_OFDM_RATE;
+               for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+                       if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+                               return idx - band_offset;
+       }
+
+       return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+       /* data from PHY/DSP regarding signal strength, etc.,
+        *   contents are always there, not configurable by host.  */
+       struct il4965_rx_non_cfg_phy *ncphy =
+           (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+       u32 agc =
+           (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+           IL49_AGC_DB_POS;
+
+       u32 valid_antennae =
+           (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+           >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+       u8 max_rssi = 0;
+       u32 i;
+
+       /* Find max rssi among 3 possible receivers.
+        * These values are measured by the digital signal processor (DSP).
+        * They should stay fairly constant even as the signal strength varies,
+        *   if the radio's automatic gain control (AGC) is working right.
+        * AGC value (see below) will provide the "interesting" info. */
+       for (i = 0; i < 3; i++)
+               if (valid_antennae & (1 << i))
+                       max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+       D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+               ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+               max_rssi, agc);
+
+       /* dBm = max_rssi dB - agc dB - constant.
+        * Higher AGC (higher radio gain) means lower signal. */
+       return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+       u32 decrypt_out = 0;
+
+       if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+           RX_RES_STATUS_STATION_FOUND)
+               decrypt_out |=
+                   (RX_RES_STATUS_STATION_FOUND |
+                    RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+       decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+       /* packet was not encrypted */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+           RX_RES_STATUS_SEC_TYPE_NONE)
+               return decrypt_out;
+
+       /* packet was encrypted with unknown alg */
+       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+           RX_RES_STATUS_SEC_TYPE_ERR)
+               return decrypt_out;
+
+       /* decryption was not done in HW */
+       if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+           RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+               return decrypt_out;
+
+       switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               /* alg is CCM: check MIC only */
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+                       /* Bad MIC */
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+               break;
+
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+                       /* Bad TTAK */
+                       decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+                       break;
+               }
+               /* fall through if TTAK OK */
+       default:
+               if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+               else
+                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+               break;
+       }
+
+       D_RX("decrypt_in:0x%x  decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+       return decrypt_out;
+}
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+                              u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+                              struct ieee80211_rx_status *stats)
+{
+       struct sk_buff *skb;
+       __le16 fc = hdr->frame_control;
+
+       /* We only process data packets if the interface is open */
+       if (unlikely(!il->is_open)) {
+               D_DROP("Dropping packet while interface is not open.\n");
+               return;
+       }
+
+       /* In case of HW accelerated crypto and bad decryption, drop */
+       if (!il->cfg->mod_params->sw_crypto &&
+           il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+               return;
+
+       skb = dev_alloc_skb(128);
+       if (!skb) {
+               IL_ERR("dev_alloc_skb failed\n");
+               return;
+       }
+
+       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+       il_update_stats(il, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(il->hw, skb);
+       il->alloc_rxb_page--;
+       rxb->page = NULL;
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct ieee80211_hdr *header;
+       struct ieee80211_rx_status rx_status;
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_rx_phy_res *phy_res;
+       __le32 rx_pkt_status;
+       struct il_rx_mpdu_res_start *amsdu;
+       u32 len;
+       u32 ampdu_status;
+       u32 rate_n_flags;
+
+       /**
+        * N_RX and N_RX_MPDU are handled differently.
+        *      N_RX: physical layer info is in this buffer
+        *      N_RX_MPDU: physical layer info was sent in separate
+        *              command and cached in il->last_phy_res
+        *
+        * Here we set up local variables depending on which command is
+        * received.
+        */
+       if (pkt->hdr.cmd == N_RX) {
+               phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+               header =
+                   (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+                                            phy_res->cfg_phy_cnt);
+
+               len = le16_to_cpu(phy_res->byte_count);
+               rx_pkt_status =
+                   *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+                                phy_res->cfg_phy_cnt + len);
+               ampdu_status = le32_to_cpu(rx_pkt_status);
+       } else {
+               if (!il->_4965.last_phy_res_valid) {
+                       IL_ERR("MPDU frame without cached PHY data\n");
+                       return;
+               }
+               phy_res = &il->_4965.last_phy_res;
+               amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+               len = le16_to_cpu(amsdu->byte_count);
+               rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+               ampdu_status =
+                   il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+       }
+
+       if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+               D_DROP("dsp size out of range [0,20]: %d/n",
+                      phy_res->cfg_phy_cnt);
+               return;
+       }
+
+       if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+           !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+               D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+               return;
+       }
+
+       /* This will be used in several places later */
+       rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+       /* rx_status carries information about the packet to mac80211 */
+       rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+       rx_status.band =
+           (phy_res->
+            phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+           IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+           ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+                                          rx_status.band);
+       rx_status.rate_idx =
+           il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+       rx_status.flag = 0;
+
+       /* TSF isn't reliable. In order to allow smooth user experience,
+        * this W/A doesn't propagate it to the mac80211 */
+       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+
+       il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+       rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+       il_dbg_log_rx_data_frame(il, len, header);
+       D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+               (unsigned long long)rx_status.mactime);
+
+       /*
+        * "antenna number"
+        *
+        * It seems that the antenna field in the phy flags value
+        * is actually a bit field. This is undefined by radiotap,
+        * it wants an actual antenna number but I always get "7"
+        * for most legacy frames I receive indicating that the
+        * same frame was received on all three RX chains.
+        *
+        * I think this field should be removed in favor of a
+        * new 802.11n radiotap field "RX chains" that is defined
+        * as a bitmask.
+        */
+       rx_status.antenna =
+           (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+           RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+       /* set the preamble flag if appropriate */
+       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+               rx_status.flag |= RX_FLAG_SHORTPRE;
+
+       /* Set up the HT phy flags */
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               rx_status.flag |= RX_FLAG_HT;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               rx_status.flag |= RX_FLAG_40MHZ;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               rx_status.flag |= RX_FLAG_SHORT_GI;
+
+       il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+                                      &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       il->_4965.last_phy_res_valid = true;
+       memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+              sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+                            enum ieee80211_band band, u8 is_active,
+                            u8 n_probes, struct il_scan_channel *scan_ch)
+{
+       struct ieee80211_channel *chan;
+       const struct ieee80211_supported_band *sband;
+       const struct il_channel_info *ch_info;
+       u16 passive_dwell = 0;
+       u16 active_dwell = 0;
+       int added, i;
+       u16 channel;
+
+       sband = il_get_hw_mode(il, band);
+       if (!sband)
+               return 0;
+
+       active_dwell = il_get_active_dwell_time(il, band, n_probes);
+       passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+       if (passive_dwell <= active_dwell)
+               passive_dwell = active_dwell + 1;
+
+       for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+               chan = il->scan_request->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               channel = chan->hw_value;
+               scan_ch->channel = cpu_to_le16(channel);
+
+               ch_info = il_get_channel_info(il, band, channel);
+               if (!il_is_channel_valid(ch_info)) {
+                       D_SCAN("Channel %d is INVALID for this band.\n",
+                              channel);
+                       continue;
+               }
+
+               if (!is_active || il_is_channel_passive(ch_info) ||
+                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+                       scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+               else
+                       scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+               if (n_probes)
+                       scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+               scan_ch->active_dwell = cpu_to_le16(active_dwell);
+               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+               /* Set txpower levels to defaults */
+               scan_ch->dsp_atten = 110;
+
+               /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+                * power level:
+                * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+                */
+               if (band == IEEE80211_BAND_5GHZ)
+                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+               else
+                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+               D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+                      le32_to_cpu(scan_ch->type),
+                      (scan_ch->
+                       type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+                      (scan_ch->
+                       type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+                      passive_dwell);
+
+               scan_ch++;
+               added++;
+       }
+
+       D_SCAN("total channels to scan %d\n", added);
+       return added;
+}
+
+static inline u32
+il4965_ant_idx_to_flags(u8 ant_idx)
+{
+       return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+       struct il_host_cmd cmd = {
+               .id = C_SCAN,
+               .len = sizeof(struct il_scan_cmd),
+               .flags = CMD_SIZE_HUGE,
+       };
+       struct il_scan_cmd *scan;
+       struct il_rxon_context *ctx = &il->ctx;
+       u32 rate_flags = 0;
+       u16 cmd_len;
+       u16 rx_chain = 0;
+       enum ieee80211_band band;
+       u8 n_probes = 0;
+       u8 rx_ant = il->hw_params.valid_rx_ant;
+       u8 rate;
+       bool is_active = false;
+       int chan_mod;
+       u8 active_chains;
+       u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       ctx = il_rxon_ctx_from_vif(vif);
+
+       if (!il->scan_cmd) {
+               il->scan_cmd =
+                   kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+                           GFP_KERNEL);
+               if (!il->scan_cmd) {
+                       D_SCAN("fail to allocate memory for scan\n");
+                       return -ENOMEM;
+               }
+       }
+       scan = il->scan_cmd;
+       memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+       scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+       scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+       if (il_is_any_associated(il)) {
+               u16 interval;
+               u32 extra;
+               u32 suspend_time = 100;
+               u32 scan_suspend_time = 100;
+
+               D_INFO("Scanning while associated...\n");
+               interval = vif->bss_conf.beacon_int;
+
+               scan->suspend_time = 0;
+               scan->max_out_time = cpu_to_le32(200 * 1024);
+               if (!interval)
+                       interval = suspend_time;
+
+               extra = (suspend_time / interval) << 22;
+               scan_suspend_time =
+                   (extra | ((suspend_time % interval) * 1024));
+               scan->suspend_time = cpu_to_le32(scan_suspend_time);
+               D_SCAN("suspend_time 0x%X beacon interval %d\n",
+                      scan_suspend_time, interval);
+       }
+
+       if (il->scan_request->n_ssids) {
+               int i, p = 0;
+               D_SCAN("Kicking off active scan\n");
+               for (i = 0; i < il->scan_request->n_ssids; i++) {
+                       /* always does wildcard anyway */
+                       if (!il->scan_request->ssids[i].ssid_len)
+                               continue;
+                       scan->direct_scan[p].id = WLAN_EID_SSID;
+                       scan->direct_scan[p].len =
+                           il->scan_request->ssids[i].ssid_len;
+                       memcpy(scan->direct_scan[p].ssid,
+                              il->scan_request->ssids[i].ssid,
+                              il->scan_request->ssids[i].ssid_len);
+                       n_probes++;
+                       p++;
+               }
+               is_active = true;
+       } else
+               D_SCAN("Start passive scan.\n");
+
+       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+       scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+       switch (il->scan_band) {
+       case IEEE80211_BAND_2GHZ:
+               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+               chan_mod =
+                   le32_to_cpu(il->ctx.active.
+                               flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+                   RXON_FLG_CHANNEL_MODE_POS;
+               if (chan_mod == CHANNEL_MODE_PURE_40) {
+                       rate = RATE_6M_PLCP;
+               } else {
+                       rate = RATE_1M_PLCP;
+                       rate_flags = RATE_MCS_CCK_MSK;
+               }
+               break;
+       case IEEE80211_BAND_5GHZ:
+               rate = RATE_6M_PLCP;
+               break;
+       default:
+               IL_WARN("Invalid scan band\n");
+               return -EIO;
+       }
+
+       /*
+        * If active scanning is requested but a certain channel is
+        * marked passive, we can do active scanning if we detect
+        * transmissions.
+        *
+        * There is an issue with some firmware versions that triggers
+        * a sysassert on a "good CRC threshold" of zero (== disabled),
+        * on a radar channel even though this means that we should NOT
+        * send probes.
+        *
+        * The "good CRC threshold" is the number of frames that we
+        * need to receive during our dwell time on a channel before
+        * sending out probes -- setting this to a huge value will
+        * mean we never reach it, but at the same time work around
+        * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+        * here instead of IL_GOOD_CRC_TH_DISABLED.
+        */
+       scan->good_CRC_th =
+           is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+       band = il->scan_band;
+
+       if (il->cfg->scan_rx_antennas[band])
+               rx_ant = il->cfg->scan_rx_antennas[band];
+
+       il->scan_tx_ant[band] =
+           il4965_toggle_tx_ant(il, il->scan_tx_ant[band], scan_tx_antennas);
+       rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
+       scan->tx_cmd.rate_n_flags =
+           il4965_hw_set_rate_n_flags(rate, rate_flags);
+
+       /* In power save mode use one chain, otherwise use all chains */
+       if (test_bit(S_POWER_PMI, &il->status)) {
+               /* rx_ant has been set to all valid chains previously */
+               active_chains =
+                   rx_ant & ((u8) (il->chain_noise_data.active_chains));
+               if (!active_chains)
+                       active_chains = rx_ant;
+
+               D_SCAN("chain_noise_data.active_chains: %u\n",
+                      il->chain_noise_data.active_chains);
+
+               rx_ant = il4965_first_antenna(active_chains);
+       }
+
+       /* MIMO is not used here, but value is required */
+       rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+       rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+       scan->rx_chain = cpu_to_le16(rx_chain);
+
+       cmd_len =
+           il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+                             vif->addr, il->scan_request->ie,
+                             il->scan_request->ie_len,
+                             IL_MAX_SCAN_SIZE - sizeof(*scan));
+       scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+       scan->filter_flags |=
+           (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+       scan->channel_count =
+           il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+                                        (void *)&scan->data[cmd_len]);
+       if (scan->channel_count == 0) {
+               D_SCAN("channel count %d\n", scan->channel_count);
+               return -EIO;
+       }
+
+       cmd.len +=
+           le16_to_cpu(scan->tx_cmd.len) +
+           scan->channel_count * sizeof(struct il_scan_channel);
+       cmd.data = scan;
+       scan->len = cpu_to_le16(cmd.len);
+
+       set_bit(S_SCAN_HW, &il->status);
+
+       ret = il_send_cmd_sync(il, &cmd);
+       if (ret)
+               clear_bit(S_SCAN_HW, &il->status);
+
+       return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+                          bool add)
+{
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       if (add)
+               return il4965_add_bssid_station(il, vif_priv->ctx,
+                                               vif->bss_conf.bssid,
+                                               &vif_priv->ibss_bssid_sta_id);
+       return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+                                vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+       lockdep_assert_held(&il->sta_lock);
+
+       if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+               il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+       else {
+               D_TX("free more than tfds_in_queue (%u:%d)\n",
+                    il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+               il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+       }
+}
+
+#define IL_TX_QUEUE_MSK        0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+       return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+           il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE      3
+#define IL_NUM_RX_CHAINS_SINGLE        2
+#define IL_NUM_IDLE_CHAINS_DUAL        2
+#define IL_NUM_IDLE_CHAINS_SINGLE      1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+       /* # of Rx chains to use when expecting MIMO. */
+       if (il4965_is_single_rx_stream(il))
+               return IL_NUM_RX_CHAINS_SINGLE;
+       else
+               return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+       /* # Rx chains when idling, depending on SMPS mode */
+       switch (il->current_ht_config.smps) {
+       case IEEE80211_SMPS_STATIC:
+       case IEEE80211_SMPS_DYNAMIC:
+               return IL_NUM_IDLE_CHAINS_SINGLE;
+       case IEEE80211_SMPS_OFF:
+               return active_cnt;
+       default:
+               WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+               return active_cnt;
+       }
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+       u8 res;
+       res = (chain_bitmap & BIT(0)) >> 0;
+       res += (chain_bitmap & BIT(1)) >> 1;
+       res += (chain_bitmap & BIT(2)) >> 2;
+       res += (chain_bitmap & BIT(3)) >> 3;
+       return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       bool is_single = il4965_is_single_rx_stream(il);
+       bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+       u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+       u32 active_chains;
+       u16 rx_chain;
+
+       /* Tell uCode which antennas are actually connected.
+        * Before first association, we assume all antennas are connected.
+        * Just after first association, il4965_chain_noise_calibration()
+        *    checks which antennas actually *are* connected. */
+       if (il->chain_noise_data.active_chains)
+               active_chains = il->chain_noise_data.active_chains;
+       else
+               active_chains = il->hw_params.valid_rx_ant;
+
+       rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+       /* How many receivers should we use? */
+       active_rx_cnt = il4965_get_active_rx_chain_count(il);
+       idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+       /* correct rx chain count according hw settings
+        * and chain noise calibration
+        */
+       valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+       if (valid_rx_cnt < active_rx_cnt)
+               active_rx_cnt = valid_rx_cnt;
+
+       if (valid_rx_cnt < idle_rx_cnt)
+               idle_rx_cnt = valid_rx_cnt;
+
+       rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+       rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+       ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+       if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+               ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+       else
+               ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+       D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+               active_rx_cnt, idle_rx_cnt);
+
+       WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+               active_rx_cnt < idle_rx_cnt);
+}
+
+u8
+il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
+{
+       int i;
+       u8 ind = ant;
+
+       for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+               ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+               if (valid & BIT(ind))
+                       return ind;
+       }
+       return ant;
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+       switch (cmd) {
+               IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+               IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+               IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+               IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+               IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+               IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+               IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+               IL_CMD(FH49_TSSR_TX_STATUS_REG);
+               IL_CMD(FH49_TSSR_TX_ERROR_REG);
+       default:
+               return "UNKNOWN";
+       }
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+       int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+       int pos = 0;
+       size_t bufsz = 0;
+#endif
+       static const u32 fh_tbl[] = {
+               FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+               FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+               FH49_RSCSR_CHNL0_WPTR,
+               FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+               FH49_MEM_RSSR_SHARED_CTRL_REG,
+               FH49_MEM_RSSR_RX_STATUS_REG,
+               FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+               FH49_TSSR_TX_STATUS_REG,
+               FH49_TSSR_TX_ERROR_REG
+       };
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (display) {
+               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+               *buf = kmalloc(bufsz, GFP_KERNEL);
+               if (!*buf)
+                       return -ENOMEM;
+               pos +=
+                   scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+                       pos +=
+                           scnprintf(*buf + pos, bufsz - pos,
+                                     "  %34s: 0X%08x\n",
+                                     il4965_get_fh_string(fh_tbl[i]),
+                                     il_rd(il, fh_tbl[i]));
+               }
+               return pos;
+       }
+#endif
+       IL_ERR("FH register values:\n");
+       for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+               IL_ERR("  %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+                      il_rd(il, fh_tbl[i]));
+       }
+       return 0;
+}
+
+void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_missed_beacon_notif *missed_beacon;
+
+       missed_beacon = &pkt->u.missed_beacon;
+       if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+           il->missed_beacon_threshold) {
+               D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+                       le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+                       le32_to_cpu(missed_beacon->total_missed_becons),
+                       le32_to_cpu(missed_beacon->num_recvd_beacons),
+                       le32_to_cpu(missed_beacon->num_expected_beacons));
+               if (!test_bit(S_SCANNING, &il->status))
+                       il4965_init_sensitivity(il);
+       }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+       struct stats_rx_non_phy *rx_info;
+       int num_active_rx = 0;
+       int total_silence = 0;
+       int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+       int last_rx_noise;
+
+       rx_info = &(il->_4965.stats.rx.general);
+       bcn_silence_a =
+           le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+       bcn_silence_b =
+           le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+       bcn_silence_c =
+           le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+       if (bcn_silence_a) {
+               total_silence += bcn_silence_a;
+               num_active_rx++;
+       }
+       if (bcn_silence_b) {
+               total_silence += bcn_silence_b;
+               num_active_rx++;
+       }
+       if (bcn_silence_c) {
+               total_silence += bcn_silence_c;
+               num_active_rx++;
+       }
+
+       /* Average among active antennas */
+       if (num_active_rx)
+               last_rx_noise = (total_silence / num_active_rx) - 107;
+       else
+               last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+       D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+               bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ *  based on the assumption of all stats counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+       int i, size;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+       struct stats_general_common *general, *accum_general;
+       struct stats_tx *tx, *accum_tx;
+
+       prev_stats = (__le32 *) &il->_4965.stats;
+       accum_stats = (u32 *) &il->_4965.accum_stats;
+       size = sizeof(struct il_notif_stats);
+       general = &il->_4965.stats.general.common;
+       accum_general = &il->_4965.accum_stats.general.common;
+       tx = &il->_4965.stats.tx;
+       accum_tx = &il->_4965.accum_stats.tx;
+       delta = (u32 *) &il->_4965.delta_stats;
+       max_delta = (u32 *) &il->_4965.max_delta;
+
+       for (i = sizeof(__le32); i < size;
+            i +=
+            sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+            accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta =
+                           (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
+               }
+       }
+
+       /* reset accumulative stats for "no-counter" type stats */
+       accum_general->temperature = general->temperature;
+       accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       int change;
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+       D_RX("Statistics notification received (%d vs %d).\n",
+            (int)sizeof(struct il_notif_stats),
+            le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+       change =
+           ((il->_4965.stats.general.common.temperature !=
+             pkt->u.stats.general.common.temperature) ||
+            ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+             (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+       /* TODO: reading some of stats is unneeded */
+       memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+       set_bit(S_STATS, &il->status);
+
+       /* Reschedule the stats timer to occur in
+        * REG_RECALIB_PERIOD seconds to ensure we get a
+        * thermal update even if the uCode doesn't give
+        * us one */
+       mod_timer(&il->stats_periodic,
+                 jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+       if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+           (pkt->hdr.cmd == N_STATS)) {
+               il4965_rx_calc_noise(il);
+               queue_work(il->workqueue, &il->run_time_calib_work);
+       }
+       if (il->cfg->ops->lib->temp_ops.temperature && change)
+               il->cfg->ops->lib->temp_ops.temperature(il);
+}
+
+void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+               memset(&il->_4965.accum_stats, 0,
+                      sizeof(struct il_notif_stats));
+               memset(&il->_4965.delta_stats, 0,
+                      sizeof(struct il_notif_stats));
+               memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+               D_RX("Statistics have been cleared\n");
+       }
+       il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ *     VO      0
+ *     VI      1
+ *     BE      2
+ *     BK      3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+       IEEE80211_AC_BE,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BK,
+       IEEE80211_AC_BE,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VI,
+       IEEE80211_AC_VO,
+       IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+               return tid_to_ac[tid];
+
+       /* no support for TIDs 8-15 yet */
+       return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+{
+       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+               return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+       /* no support for TIDs 8-15 yet */
+       return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+                         struct il_tx_cmd *tx_cmd,
+                         struct ieee80211_tx_info *info,
+                         struct ieee80211_hdr *hdr, u8 std_id)
+{
+       __le16 fc = hdr->frame_control;
+       __le32 tx_flags = tx_cmd->tx_flags;
+
+       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+               tx_flags |= TX_CMD_FLG_ACK_MSK;
+               if (ieee80211_is_mgmt(fc))
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (ieee80211_is_probe_resp(fc) &&
+                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+                       tx_flags |= TX_CMD_FLG_TSF_MSK;
+       } else {
+               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       if (ieee80211_is_back_req(fc))
+               tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+       tx_cmd->sta_id = std_id;
+       if (ieee80211_has_morefrags(fc))
+               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+       if (ieee80211_is_data_qos(fc)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tx_cmd->tid_tspec = qc[0] & 0xf;
+               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+       } else {
+               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+       }
+
+       il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+               else
+                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+       } else {
+               tx_cmd->timeout.pm_frame_timeout = 0;
+       }
+
+       tx_cmd->driver_txop = 0;
+       tx_cmd->tx_flags = tx_flags;
+       tx_cmd->next_frame_len = 0;
+}
+
+#define RTS_DFAULT_RETRY_LIMIT         60
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
+                        struct ieee80211_tx_info *info, __le16 fc)
+{
+       u32 rate_flags;
+       int rate_idx;
+       u8 rts_retry_limit;
+       u8 data_retry_limit;
+       u8 rate_plcp;
+
+       /* Set retry limit on DATA packets and Probe Responses */
+       if (ieee80211_is_probe_resp(fc))
+               data_retry_limit = 3;
+       else
+               data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+       tx_cmd->data_retry_limit = data_retry_limit;
+
+       /* Set retry limit on RTS packets */
+       rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
+       if (data_retry_limit < rts_retry_limit)
+               rts_retry_limit = data_retry_limit;
+       tx_cmd->rts_retry_limit = rts_retry_limit;
+
+       /* DATA packets will use the uCode station table for rate/antenna
+        * selection */
+       if (ieee80211_is_data(fc)) {
+               tx_cmd->initial_rate_idx = 0;
+               tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+               return;
+       }
+
+       /**
+        * If the current TX rate stored in mac80211 has the MCS bit set, it's
+        * not really a TX rate.  Thus, we use the lowest supported rate for
+        * this band.  Also use the lowest supported rate if the stored rate
+        * idx is invalid.
+        */
+       rate_idx = info->control.rates[0].idx;
+       if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+           || rate_idx > RATE_COUNT_LEGACY)
+               rate_idx =
+                   rate_lowest_index(&il->bands[info->band],
+                                     info->control.sta);
+       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+       if (info->band == IEEE80211_BAND_5GHZ)
+               rate_idx += IL_FIRST_OFDM_RATE;
+       /* Get PLCP rate for tx_cmd->rate_n_flags */
+       rate_plcp = il_rates[rate_idx].plcp;
+       /* Zero out flags for this packet */
+       rate_flags = 0;
+
+       /* Set CCK flag as needed */
+       if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       /* Set up antennas */
+       il->mgmt_tx_ant =
+           il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
+                                il->hw_params.valid_tx_ant);
+
+       rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
+
+       /* Set the rate in the TX cmd */
+       tx_cmd->rate_n_flags =
+           il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+                            struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+                            int sta_id)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+               if (info->flags & IEEE80211_TX_CTL_AMPDU)
+                       tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+               D_TX("tx_cmd with AES hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_TKIP:
+               tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+               ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+               D_TX("tx_cmd with tkip hwcrypto\n");
+               break;
+
+       case WLAN_CIPHER_SUITE_WEP104:
+               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_WEP40:
+               tx_cmd->sec_ctl |=
+                   (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+                    TX_CMD_SEC_SHIFT);
+
+               memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+               D_TX("Configuring packet for WEP encryption " "with key %d\n",
+                    keyconf->keyidx);
+               break;
+
+       default:
+               IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+               break;
+       }
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_sta *sta = info->control.sta;
+       struct il_station_priv *sta_priv = NULL;
+       struct il_tx_queue *txq;
+       struct il_queue *q;
+       struct il_device_cmd *out_cmd;
+       struct il_cmd_meta *out_meta;
+       struct il_tx_cmd *tx_cmd;
+       struct il_rxon_context *ctx = &il->ctx;
+       int txq_id;
+       dma_addr_t phys_addr;
+       dma_addr_t txcmd_phys;
+       dma_addr_t scratch_phys;
+       u16 len, firstlen, secondlen;
+       u16 seq_number = 0;
+       __le16 fc;
+       u8 hdr_len;
+       u8 sta_id;
+       u8 wait_write_ptr = 0;
+       u8 tid = 0;
+       u8 *qc = NULL;
+       unsigned long flags;
+       bool is_agg = false;
+
+       if (info->control.vif)
+               ctx = il_rxon_ctx_from_vif(info->control.vif);
+
+       spin_lock_irqsave(&il->lock, flags);
+       if (il_is_rfkill(il)) {
+               D_DROP("Dropping - RF KILL\n");
+               goto drop_unlock;
+       }
+
+       fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (ieee80211_is_auth(fc))
+               D_TX("Sending AUTH frame\n");
+       else if (ieee80211_is_assoc_req(fc))
+               D_TX("Sending ASSOC frame\n");
+       else if (ieee80211_is_reassoc_req(fc))
+               D_TX("Sending REASSOC frame\n");
+#endif
+
+       hdr_len = ieee80211_hdrlen(fc);
+
+       /* For management frames use broadcast id to do not break aggregation */
+       if (!ieee80211_is_data(fc))
+               sta_id = ctx->bcast_sta_id;
+       else {
+               /* Find idx into station table for destination station */
+               sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+
+               if (sta_id == IL_INVALID_STATION) {
+                       D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+                       goto drop_unlock;
+               }
+       }
+
+       D_TX("station Id %d\n", sta_id);
+
+       if (sta)
+               sta_priv = (void *)sta->drv_priv;
+
+       if (sta_priv && sta_priv->asleep &&
+           (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+               /*
+                * This sends an asynchronous command to the device,
+                * but we can rely on it being processed before the
+                * next frame is processed -- and the next frame to
+                * this station is the one that will consume this
+                * counter.
+                * For now set the counter to just 1 since we do not
+                * support uAPSD yet.
+                */
+               il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+       }
+
+       /*
+        * Send this frame after DTIM -- there's a special queue
+        * reserved for this for contexts that support AP mode.
+        */
+       if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+               txq_id = ctx->mcast_queue;
+               /*
+                * The microcode will clear the more data
+                * bit in the last frame it transmits.
+                */
+               hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+       } else
+               txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+       /* irqs already disabled/saved above when locking il->lock */
+       spin_lock(&il->sta_lock);
+
+       if (ieee80211_is_data_qos(fc)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+               if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+                       spin_unlock(&il->sta_lock);
+                       goto drop_unlock;
+               }
+               seq_number = il->stations[sta_id].tid[tid].seq_number;
+               seq_number &= IEEE80211_SCTL_SEQ;
+               hdr->seq_ctrl =
+                   hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(seq_number);
+               seq_number += 0x10;
+               /* aggregation is on for this <sta,tid> */
+               if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+                   il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+                       txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+                       is_agg = true;
+               }
+       }
+
+       txq = &il->txq[txq_id];
+       q = &txq->q;
+
+       if (unlikely(il_queue_space(q) < q->high_mark)) {
+               spin_unlock(&il->sta_lock);
+               goto drop_unlock;
+       }
+
+       if (ieee80211_is_data_qos(fc)) {
+               il->stations[sta_id].tid[tid].tfds_in_queue++;
+               if (!ieee80211_has_morefrags(fc))
+                       il->stations[sta_id].tid[tid].seq_number = seq_number;
+       }
+
+       spin_unlock(&il->sta_lock);
+
+       /* Set up driver data for this TFD */
+       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+       txq->txb[q->write_ptr].skb = skb;
+       txq->txb[q->write_ptr].ctx = ctx;
+
+       /* Set up first empty entry in queue's array of Tx/cmd buffers */
+       out_cmd = txq->cmd[q->write_ptr];
+       out_meta = &txq->meta[q->write_ptr];
+       tx_cmd = &out_cmd->cmd.tx;
+       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+       memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+       /*
+        * Set up the Tx-command (not MAC!) header.
+        * Store the chosen Tx queue and TFD idx within the sequence field;
+        * after Tx, uCode's Tx response will return this value so driver can
+        * locate the frame within the tx queue and do post-tx processing.
+        */
+       out_cmd->hdr.cmd = C_TX;
+       out_cmd->hdr.sequence =
+           cpu_to_le16((u16)
+                       (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+       /* Total # bytes to be transmitted */
+       len = (u16) skb->len;
+       tx_cmd->len = cpu_to_le16(len);
+
+       if (info->control.hw_key)
+               il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+       /* TODO need this for burst mode later on */
+       il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+       il_dbg_log_tx_data_frame(il, len, hdr);
+
+       il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+
+       il_update_stats(il, true, fc, len);
+       /*
+        * Use the first empty entry in this queue's command buffer array
+        * to contain the Tx command and MAC header concatenated together
+        * (payload data will be in another buffer).
+        * Size of this varies, due to varying MAC header length.
+        * If end is not dword aligned, we'll have 2 extra bytes at the end
+        * of the MAC header (device reads on dword boundaries).
+        * We'll tell device about this padding later.
+        */
+       len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+       firstlen = (len + 3) & ~3;
+
+       /* Tell NIC about any 2-byte padding after MAC header */
+       if (firstlen != len)
+               tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+       /* Physical address of this Tx command's header (not MAC header!),
+        * within command buffer array. */
+       txcmd_phys =
+           pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+                          PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+       dma_unmap_len_set(out_meta, len, firstlen);
+       /* Add buffer containing Tx command and MAC(!) header to TFD's
+        * first entry */
+       il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
+                                                1, 0);
+
+       if (!ieee80211_has_morefrags(hdr->frame_control)) {
+               txq->need_update = 1;
+       } else {
+               wait_write_ptr = 1;
+               txq->need_update = 0;
+       }
+
+       /* Set up TFD's 2nd entry to point directly to remainder of skb,
+        * if any (802.11 null frames have no payload). */
+       secondlen = skb->len - hdr_len;
+       if (secondlen > 0) {
+               phys_addr =
+                   pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+                                  PCI_DMA_TODEVICE);
+               il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+                                                        secondlen, 0, 0);
+       }
+
+       scratch_phys =
+           txcmd_phys + sizeof(struct il_cmd_header) +
+           offsetof(struct il_tx_cmd, scratch);
+
+       /* take back ownership of DMA buffer to enable update */
+       pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+                                   PCI_DMA_BIDIRECTIONAL);
+       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+       tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+       D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+       D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+       il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+       il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+       /* Set up entry for this TFD in Tx byte-count array */
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
+                                                          le16_to_cpu(tx_cmd->
+                                                                      len));
+
+       pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+                                      PCI_DMA_BIDIRECTIONAL);
+
+       /* Tell device the write idx *just past* this latest filled TFD */
+       q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+       il_txq_update_write_ptr(il, txq);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       /*
+        * At this point the frame is "transmitted" successfully
+        * and we will get a TX status notification eventually,
+        * regardless of the value of ret. "ret" only indicates
+        * whether or not we should update the write pointer.
+        */
+
+       /*
+        * Avoid atomic ops if it isn't an associated client.
+        * Also, if this is a packet for aggregation, don't
+        * increase the counter because the ucode will stop
+        * aggregation queues when their respective station
+        * goes to sleep.
+        */
+       if (sta_priv && sta_priv->client && !is_agg)
+               atomic_inc(&sta_priv->pending_frames);
+
+       if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+               if (wait_write_ptr) {
+                       spin_lock_irqsave(&il->lock, flags);
+                       txq->need_update = 1;
+                       il_txq_update_write_ptr(il, txq);
+                       spin_unlock_irqrestore(&il->lock, flags);
+               } else {
+                       il_stop_queue(il, txq);
+               }
+       }
+
+       return 0;
+
+drop_unlock:
+       spin_unlock_irqrestore(&il->lock, flags);
+       return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+       ptr->addr =
+           dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+       if (!ptr->addr)
+               return -ENOMEM;
+       ptr->size = size;
+       return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+       if (unlikely(!ptr->addr))
+               return;
+
+       dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+       memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+       int txq_id;
+
+       /* Tx queues */
+       if (il->txq) {
+               for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+                       if (txq_id == il->cmd_queue)
+                               il_cmd_queue_free(il);
+                       else
+                               il_tx_queue_free(il, txq_id);
+       }
+       il4965_free_dma_ptr(il, &il->kw);
+
+       il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+       /* free tx queue structure */
+       il_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+       int ret;
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       /* Free all tx/cmd queues and keep-warm buffer */
+       il4965_hw_txq_ctx_free(il);
+
+       ret =
+           il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+                                il->hw_params.scd_bc_tbls_size);
+       if (ret) {
+               IL_ERR("Scheduler BC Table allocation failed\n");
+               goto error_bc_tbls;
+       }
+       /* Alloc keep-warm buffer */
+       ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+       if (ret) {
+               IL_ERR("Keep Warm allocation failed\n");
+               goto error_kw;
+       }
+
+       /* allocate tx queue structure */
+       ret = il_alloc_txq_mem(il);
+       if (ret)
+               goto error;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       il4965_txq_set_sched(il, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+       for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+               slots_num =
+                   (txq_id ==
+                    il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+               if (ret) {
+                       IL_ERR("Tx %d queue init failed\n", txq_id);
+                       goto error;
+               }
+       }
+
+       return ret;
+
+error:
+       il4965_hw_txq_ctx_free(il);
+       il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+       il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+       return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+       int txq_id, slots_num;
+       unsigned long flags;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Turn off all Tx DMA fifos */
+       il4965_txq_set_sched(il, 0);
+
+       /* Tell NIC where to find the "keep warm" buffer */
+       il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       /* Alloc and init all Tx queues, including the command queue (#4) */
+       for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+               slots_num =
+                   txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+               il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
+       }
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+       int ch, txq_id;
+       unsigned long flags;
+
+       /* Turn off all Tx DMA fifos */
+       spin_lock_irqsave(&il->lock, flags);
+
+       il4965_txq_set_sched(il, 0);
+
+       /* Stop each Tx DMA channel, and wait for it to be idle */
+       for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+               il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+               if (il_poll_bit
+                   (il, FH49_TSSR_TX_STATUS_REG,
+                    FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
+                       IL_ERR("Failing on timeout while stopping"
+                              " DMA channel %d [0x%08x]", ch,
+                              il_rd(il, FH49_TSSR_TX_STATUS_REG));
+       }
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       if (!il->txq)
+               return;
+
+       /* Unmap DMA from host system and free skb's */
+       for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+               if (txq_id == il->cmd_queue)
+                       il_cmd_queue_unmap(il);
+               else
+                       il_tx_queue_unmap(il, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+       int txq_id;
+
+       for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+               if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+                       return txq_id;
+       return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+       /* Simply stop the queue, but don't change any configuration;
+        * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+       il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+                  (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+                  (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+       u32 tbl_dw_addr;
+       u32 tbl_dw;
+       u16 scd_q2ratid;
+
+       scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+       tbl_dw_addr =
+           il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+       tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+       if (txq_id & 0x1)
+               tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+       else
+               tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+       il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+       return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE:  txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ *        i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+                     int tid, u16 ssn_idx)
+{
+       unsigned long flags;
+       u16 ra_tid;
+       int ret;
+
+       if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+           (IL49_FIRST_AMPDU_QUEUE +
+            il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+               IL_WARN("queue number out of range: %d, must be %d to %d\n",
+                       txq_id, IL49_FIRST_AMPDU_QUEUE,
+                       IL49_FIRST_AMPDU_QUEUE +
+                       il->cfg->base_params->num_of_ampdu_queues - 1);
+               return -EINVAL;
+       }
+
+       ra_tid = BUILD_RAxTID(sta_id, tid);
+
+       /* Modify device's station table to Tx this TID */
+       ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Stop this Tx queue before configuring it */
+       il4965_tx_queue_stop_scheduler(il, txq_id);
+
+       /* Map receiver-address / traffic-ID to this queue */
+       il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+       /* Set this queue as a chain-building queue */
+       il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+       /* Place first TFD at idx corresponding to start sequence number.
+        * Assumes that ssn_idx is valid (!= 0xFFF) */
+       il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+       il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+       il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+       /* Set up Tx win size and frame limit for this queue */
+       il_write_targ_mem(il,
+                         il->scd_base_addr +
+                         IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+                         (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+                         & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+       il_write_targ_mem(il,
+                         il->scd_base_addr +
+                         IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+                         (SCD_FRAME_LIMIT <<
+                          IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+                         IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+       il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+       /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+       il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+                   struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+       int sta_id;
+       int tx_fifo;
+       int txq_id;
+       int ret;
+       unsigned long flags;
+       struct il_tid_data *tid_data;
+
+       tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+       if (unlikely(tx_fifo < 0))
+               return tx_fifo;
+
+       D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+       sta_id = il_sta_id(sta);
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Start AGG on invalid station\n");
+               return -ENXIO;
+       }
+       if (unlikely(tid >= MAX_TID_COUNT))
+               return -EINVAL;
+
+       if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+               IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+               return -ENXIO;
+       }
+
+       txq_id = il4965_txq_ctx_activate_free(il);
+       if (txq_id == -1) {
+               IL_ERR("No free aggregation queue available\n");
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       tid_data = &il->stations[sta_id].tid[tid];
+       *ssn = SEQ_TO_SN(tid_data->seq_number);
+       tid_data->agg.txq_id = txq_id;
+       il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       tid_data = &il->stations[sta_id].tid[tid];
+       if (tid_data->tfds_in_queue == 0) {
+               D_HT("HW queue is empty\n");
+               tid_data->agg.state = IL_AGG_ON;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+       } else {
+               D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+                    tid_data->tfds_in_queue);
+               tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+       }
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+       return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+       if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+           (IL49_FIRST_AMPDU_QUEUE +
+            il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+               IL_WARN("queue number out of range: %d, must be %d to %d\n",
+                       txq_id, IL49_FIRST_AMPDU_QUEUE,
+                       IL49_FIRST_AMPDU_QUEUE +
+                       il->cfg->base_params->num_of_ampdu_queues - 1);
+               return -EINVAL;
+       }
+
+       il4965_tx_queue_stop_scheduler(il, txq_id);
+
+       il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+       il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+       il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+       /* supposes that ssn_idx is valid (!= 0xFFF) */
+       il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+       il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+       il_txq_ctx_deactivate(il, txq_id);
+       il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+       return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta, u16 tid)
+{
+       int tx_fifo_id, txq_id, sta_id, ssn;
+       struct il_tid_data *tid_data;
+       int write_ptr, read_ptr;
+       unsigned long flags;
+
+       tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+       if (unlikely(tx_fifo_id < 0))
+               return tx_fifo_id;
+
+       sta_id = il_sta_id(sta);
+
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Invalid station for AGG tid %d\n", tid);
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       tid_data = &il->stations[sta_id].tid[tid];
+       ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+       txq_id = tid_data->agg.txq_id;
+
+       switch (il->stations[sta_id].tid[tid].agg.state) {
+       case IL_EMPTYING_HW_QUEUE_ADDBA:
+               /*
+                * This can happen if the peer stops aggregation
+                * again before we've had a chance to drain the
+                * queue we selected previously, i.e. before the
+                * session was really started completely.
+                */
+               D_HT("AGG stop before setup done\n");
+               goto turn_off;
+       case IL_AGG_ON:
+               break;
+       default:
+               IL_WARN("Stopping AGG while state not ON or starting\n");
+       }
+
+       write_ptr = il->txq[txq_id].q.write_ptr;
+       read_ptr = il->txq[txq_id].q.read_ptr;
+
+       /* The queue is not empty */
+       if (write_ptr != read_ptr) {
+               D_HT("Stopping a non empty AGG HW QUEUE\n");
+               il->stations[sta_id].tid[tid].agg.state =
+                   IL_EMPTYING_HW_QUEUE_DELBA;
+               spin_unlock_irqrestore(&il->sta_lock, flags);
+               return 0;
+       }
+
+       D_HT("HW queue is empty\n");
+turn_off:
+       il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+       /* do not restore/save irqs */
+       spin_unlock(&il->sta_lock);
+       spin_lock(&il->lock);
+
+       /*
+        * the only reason this call can fail is queue number out of range,
+        * which can happen if uCode is reloaded and all the station
+        * information are lost. if it is outside the range, there is no need
+        * to deactivate the uCode queue, just return "success" to allow
+        *  mac80211 to clean up it own data.
+        */
+       il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+       return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+       struct il_queue *q = &il->txq[txq_id].q;
+       u8 *addr = il->stations[sta_id].sta.sta.addr;
+       struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+       struct il_rxon_context *ctx;
+
+       ctx = &il->ctx;
+
+       lockdep_assert_held(&il->sta_lock);
+
+       switch (il->stations[sta_id].tid[tid].agg.state) {
+       case IL_EMPTYING_HW_QUEUE_DELBA:
+               /* We are reclaiming the last packet of the */
+               /* aggregated HW queue */
+               if (txq_id == tid_data->agg.txq_id &&
+                   q->read_ptr == q->write_ptr) {
+                       u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+                       int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+                       D_HT("HW queue empty: continue DELBA flow\n");
+                       il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+                       tid_data->agg.state = IL_AGG_OFF;
+                       ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+               }
+               break;
+       case IL_EMPTYING_HW_QUEUE_ADDBA:
+               /* We are reclaiming the last packet of the queue */
+               if (tid_data->tfds_in_queue == 0) {
+                       D_HT("HW queue empty: continue ADDBA flow\n");
+                       tid_data->agg.state = IL_AGG_ON;
+                       ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
+                        const u8 *addr1)
+{
+       struct ieee80211_sta *sta;
+       struct il_station_priv *sta_priv;
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(ctx->vif, addr1);
+       if (sta) {
+               sta_priv = (void *)sta->drv_priv;
+               /* avoid atomic ops if this isn't a client */
+               if (sta_priv->client &&
+                   atomic_dec_return(&sta_priv->pending_frames) == 0)
+                       ieee80211_sta_block_awake(il->hw, sta, false);
+       }
+       rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+
+       if (!is_agg)
+               il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+
+       ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct il_queue *q = &txq->q;
+       struct il_tx_info *tx_info;
+       int nfreed = 0;
+       struct ieee80211_hdr *hdr;
+
+       if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+               IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+                      "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+                      q->write_ptr, q->read_ptr);
+               return 0;
+       }
+
+       for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+            q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               tx_info = &txq->txb[txq->q.read_ptr];
+
+               if (WARN_ON_ONCE(tx_info->skb == NULL))
+                       continue;
+
+               hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+               if (ieee80211_is_data_qos(hdr->frame_control))
+                       nfreed++;
+
+               il4965_tx_status(il, tx_info,
+                                txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+               tx_info->skb = NULL;
+
+               il->cfg->ops->lib->txq_free_tfd(il, txq);
+       }
+       return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+                                    struct il_compressed_ba_resp *ba_resp)
+{
+       int i, sh, ack;
+       u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+       int successes = 0;
+       struct ieee80211_tx_info *info;
+       u64 bitmap, sent_bitmap;
+
+       if (unlikely(!agg->wait_for_ba)) {
+               if (unlikely(ba_resp->bitmap))
+                       IL_ERR("Received BA when not expected\n");
+               return -EINVAL;
+       }
+
+       /* Mark that the expected block-ack response arrived */
+       agg->wait_for_ba = 0;
+       D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+       /* Calculate shift to align block-ack bits with our Tx win bits */
+       sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+       if (sh < 0)             /* tbw something is wrong with indices */
+               sh += 0x100;
+
+       if (agg->frame_count > (64 - sh)) {
+               D_TX_REPLY("more frames than bitmap size");
+               return -1;
+       }
+
+       /* don't use 64-bit values for now */
+       bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+       /* check for success or failure according to the
+        * transmitted bitmap and block-ack bitmap */
+       sent_bitmap = bitmap & agg->bitmap;
+
+       /* For each frame attempted in aggregation,
+        * update driver's record of tx frame's status. */
+       i = 0;
+       while (sent_bitmap) {
+               ack = sent_bitmap & 1ULL;
+               successes += ack;
+               D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+                          i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+               sent_bitmap >>= 1;
+               ++i;
+       }
+
+       D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+       info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+       memset(&info->status, 0, sizeof(info->status));
+       info->flags |= IEEE80211_TX_STAT_ACK;
+       info->flags |= IEEE80211_TX_STAT_AMPDU;
+       info->status.ampdu_ack_len = successes;
+       info->status.ampdu_len = agg->frame_count;
+       il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+       return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+                           struct ieee80211_tx_info *info)
+{
+       struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+       info->antenna_sel_tx =
+           ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+       if (rate_n_flags & RATE_MCS_HT_MSK)
+               r->flags |= IEEE80211_TX_RC_MCS;
+       if (rate_n_flags & RATE_MCS_GF_MSK)
+               r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+       if (rate_n_flags & RATE_MCS_HT40_MSK)
+               r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+       if (rate_n_flags & RATE_MCS_DUP_MSK)
+               r->flags |= IEEE80211_TX_RC_DUP_DATA;
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               r->flags |= IEEE80211_TX_RC_SHORT_GI;
+       r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+       struct il_tx_queue *txq = NULL;
+       struct il_ht_agg *agg;
+       int idx;
+       int sta_id;
+       int tid;
+       unsigned long flags;
+
+       /* "flow" corresponds to Tx queue */
+       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+       /* "ssn" is start of block-ack Tx win, corresponds to idx
+        * (in Tx queue's circular buffer) of first TFD/frame in win */
+       u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+       if (scd_flow >= il->hw_params.max_txq_num) {
+               IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+               return;
+       }
+
+       txq = &il->txq[scd_flow];
+       sta_id = ba_resp->sta_id;
+       tid = ba_resp->tid;
+       agg = &il->stations[sta_id].tid[tid].agg;
+       if (unlikely(agg->txq_id != scd_flow)) {
+               /*
+                * FIXME: this is a uCode bug which need to be addressed,
+                * log the information and return for now!
+                * since it is possible happen very often and in order
+                * not to fill the syslog, don't enable the logging by default
+                */
+               D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+                          scd_flow, agg->txq_id);
+               return;
+       }
+
+       /* Find idx just before block-ack win */
+       idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+                  agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+                  ba_resp->sta_id);
+       D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+                  "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+                  (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+                  ba_resp->scd_flow, ba_resp->scd_ssn);
+       D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+                  (unsigned long long)agg->bitmap);
+
+       /* Update driver's record of ACK vs. not for each frame in win */
+       il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+       /* Release all TFDs before the SSN, i.e. all TFDs in front of
+        * block-ack win (we assume that they've been successfully
+        * transmitted ... if not, it's too late anyway). */
+       if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+               /* calculate mac80211 ampdu sw queue to wake */
+               int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+               il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+               if (il_queue_space(&txq->q) > txq->q.low_mark &&
+                   il->mac80211_registered &&
+                   agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+                       il_wake_queue(il, txq);
+
+               il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+       }
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+       switch (status & TX_STATUS_MSK) {
+       case TX_STATUS_SUCCESS:
+               return "SUCCESS";
+               TX_STATUS_POSTPONE(DELAY);
+               TX_STATUS_POSTPONE(FEW_BYTES);
+               TX_STATUS_POSTPONE(QUIET_PERIOD);
+               TX_STATUS_POSTPONE(CALC_TTAK);
+               TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+               TX_STATUS_FAIL(SHORT_LIMIT);
+               TX_STATUS_FAIL(LONG_LIMIT);
+               TX_STATUS_FAIL(FIFO_UNDERRUN);
+               TX_STATUS_FAIL(DRAIN_FLOW);
+               TX_STATUS_FAIL(RFKILL_FLUSH);
+               TX_STATUS_FAIL(LIFE_EXPIRE);
+               TX_STATUS_FAIL(DEST_PS);
+               TX_STATUS_FAIL(HOST_ABORTED);
+               TX_STATUS_FAIL(BT_RETRY);
+               TX_STATUS_FAIL(STA_INVALID);
+               TX_STATUS_FAIL(FRAG_DROPPED);
+               TX_STATUS_FAIL(TID_DISABLE);
+               TX_STATUS_FAIL(FIFO_FLUSHED);
+               TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+               TX_STATUS_FAIL(PASSIVE_NO_RX);
+               TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+       }
+
+       return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+       int i, r;
+       struct il_link_quality_cmd *link_cmd;
+       u32 rate_flags = 0;
+       __le32 rate_n_flags;
+
+       link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+       if (!link_cmd) {
+               IL_ERR("Unable to allocate memory for LQ cmd.\n");
+               return NULL;
+       }
+       /* Set up the rate scaling to start at selected rate, fall back
+        * all the way down to 1M in IEEE order, and then spin on 1M */
+       if (il->band == IEEE80211_BAND_5GHZ)
+               r = RATE_6M_IDX;
+       else
+               r = RATE_1M_IDX;
+
+       if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       rate_flags |=
+           il4965_first_antenna(il->hw_params.
+                                valid_tx_ant) << RATE_MCS_ANT_POS;
+       rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp, rate_flags);
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+               link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+       link_cmd->general_params.single_stream_ant_msk =
+           il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+       link_cmd->general_params.dual_stream_ant_msk =
+           il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+                                                              valid_tx_ant);
+       if (!link_cmd->general_params.dual_stream_ant_msk) {
+               link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+       } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+               link_cmd->general_params.dual_stream_ant_msk =
+                   il->hw_params.valid_tx_ant;
+       }
+
+       link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+       link_cmd->agg_params.agg_time_limit =
+           cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+       link_cmd->sta_id = sta_id;
+
+       return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+                        const u8 *addr, u8 *sta_id_r)
+{
+       int ret;
+       u8 sta_id;
+       struct il_link_quality_cmd *link_cmd;
+       unsigned long flags;
+
+       if (sta_id_r)
+               *sta_id_r = IL_INVALID_STATION;
+
+       ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+       if (ret) {
+               IL_ERR("Unable to add station %pM\n", addr);
+               return ret;
+       }
+
+       if (sta_id_r)
+               *sta_id_r = sta_id;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].used |= IL_STA_LOCAL;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       /* Set up default rate scaling table in device's station table */
+       link_cmd = il4965_sta_alloc_lq(il, sta_id);
+       if (!link_cmd) {
+               IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+                      addr);
+               return -ENOMEM;
+       }
+
+       ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+       if (ret)
+               IL_ERR("Link quality command failed (%d)\n", ret);
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+                        bool send_if_empty)
+{
+       int i, not_empty = 0;
+       u8 buff[sizeof(struct il_wep_cmd) +
+               sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+       struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+       size_t cmd_size = sizeof(struct il_wep_cmd);
+       struct il_host_cmd cmd = {
+               .id = ctx->wep_key_cmd,
+               .data = wep_cmd,
+               .flags = CMD_SYNC,
+       };
+
+       might_sleep();
+
+       memset(wep_cmd, 0,
+              cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+       for (i = 0; i < WEP_KEYS_MAX; i++) {
+               wep_cmd->key[i].key_idx = i;
+               if (ctx->wep_keys[i].key_size) {
+                       wep_cmd->key[i].key_offset = i;
+                       not_empty = 1;
+               } else {
+                       wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+               }
+
+               wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+               memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+                      ctx->wep_keys[i].key_size);
+       }
+
+       wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+       wep_cmd->num_keys = WEP_KEYS_MAX;
+
+       cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+
+       cmd.len = cmd_size;
+
+       if (not_empty || send_if_empty)
+               return il_send_cmd(il, &cmd);
+       else
+               return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       lockdep_assert_held(&il->mutex);
+
+       return il4965_static_wepkey_cmd(il, ctx, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+                             struct ieee80211_key_conf *keyconf)
+{
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+
+       memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+       if (il_is_rfkill(il)) {
+               D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+               /* but keys in device are clear anyway so return success */
+               return 0;
+       }
+       ret = il4965_static_wepkey_cmd(il, ctx, 1);
+       D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+
+       return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+                          struct ieee80211_key_conf *keyconf)
+{
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       if (keyconf->keylen != WEP_KEY_LEN_128 &&
+           keyconf->keylen != WEP_KEY_LEN_64) {
+               D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+               return -EINVAL;
+       }
+
+       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->hw_key_idx = HW_KEY_DEFAULT;
+       il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+       ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+       memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+              keyconf->keylen);
+
+       ret = il4965_static_wepkey_cmd(il, ctx, false);
+       D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
+             keyconf->keyidx, ret);
+
+       return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+                               struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       struct il_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&il->mutex);
+
+       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+       key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (keyconf->keylen == WEP_KEY_LEN_128)
+               key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+       il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+       memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+       memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+              keyconf->keylen);
+
+       if ((il->stations[sta_id].sta.key.
+            key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+               il->stations[sta_id].sta.key.key_offset =
+                   il_get_free_ucode_key_idx(il);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+            "no space for a new key");
+
+       il->stations[sta_id].sta.key.key_flags = key_flags;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+                                struct il_rxon_context *ctx,
+                                struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       unsigned long flags;
+       __le16 key_flags = 0;
+       struct il_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&il->mutex);
+
+       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+       memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+       memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+       if ((il->stations[sta_id].sta.key.
+            key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+               il->stations[sta_id].sta.key.key_offset =
+                   il_get_free_ucode_key_idx(il);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+            "no space for a new key");
+
+       il->stations[sta_id].sta.key.key_flags = key_flags;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+                                struct il_rxon_context *ctx,
+                                struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       unsigned long flags;
+       int ret = 0;
+       __le16 key_flags = 0;
+
+       key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags &= ~STA_KEY_FLG_INVALID;
+
+       if (sta_id == ctx->bcast_sta_id)
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+       il->stations[sta_id].keyinfo.keylen = 16;
+
+       if ((il->stations[sta_id].sta.key.
+            key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+               il->stations[sta_id].sta.key.key_offset =
+                   il_get_free_ucode_key_idx(il);
+       /* else, we are overriding an existing key => no need to allocated room
+        * in uCode. */
+
+       WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+            "no space for a new key");
+
+       il->stations[sta_id].sta.key.key_flags = key_flags;
+
+       /* This copy is acutally not needed: we get the key with each TX */
+       memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+       memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+                      struct ieee80211_key_conf *keyconf,
+                      struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+       u8 sta_id;
+       unsigned long flags;
+       int i;
+
+       if (il_scan_cancel(il)) {
+               /* cancel scan failed, just live w/ bad key and rely
+                  briefly on SW decryption */
+               return;
+       }
+
+       sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+       if (sta_id == IL_INVALID_STATION)
+               return;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+       for (i = 0; i < 5; i++)
+               il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+                   cpu_to_le16(phase1key[i]);
+
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+                         struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       unsigned long flags;
+       u16 key_flags;
+       u8 keyidx;
+       struct il_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&il->mutex);
+
+       ctx->key_mapping_keys--;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+       keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+       D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+       if (keyconf->keyidx != keyidx) {
+               /* We need to remove a key with idx different that the one
+                * in the uCode. This means that the key we need to remove has
+                * been replaced by another one with different idx.
+                * Don't do anything and return ok
+                */
+               spin_unlock_irqrestore(&il->sta_lock, flags);
+               return 0;
+       }
+
+       if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+               IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+                       key_flags);
+               spin_unlock_irqrestore(&il->sta_lock, flags);
+               return 0;
+       }
+
+       if (!test_and_clear_bit
+           (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+               IL_ERR("idx %d not used in uCode key table.\n",
+                      il->stations[sta_id].sta.key.key_offset);
+       memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+       memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+       il->stations[sta_id].sta.key.key_flags =
+           STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+       il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+       if (il_is_rfkill(il)) {
+               D_WEP
+                   ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+               spin_unlock_irqrestore(&il->sta_lock, flags);
+               return 0;
+       }
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+                      struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       ctx->key_mapping_keys++;
+       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+       switch (keyconf->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               ret =
+                   il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               ret =
+                   il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+               break;
+       default:
+               IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+                      keyconf->cipher);
+               ret = -EINVAL;
+       }
+
+       D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+             keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+       return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       struct il_link_quality_cmd *link_cmd;
+       unsigned long flags;
+       u8 sta_id;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Unable to prepare broadcast station\n");
+               spin_unlock_irqrestore(&il->sta_lock, flags);
+
+               return -EINVAL;
+       }
+
+       il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+       il->stations[sta_id].used |= IL_STA_BCAST;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       link_cmd = il4965_sta_alloc_lq(il, sta_id);
+       if (!link_cmd) {
+               IL_ERR
+                   ("Unable to initialize rate scaling for bcast station.\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       unsigned long flags;
+       struct il_link_quality_cmd *link_cmd;
+       u8 sta_id = ctx->bcast_sta_id;
+
+       link_cmd = il4965_sta_alloc_lq(il, sta_id);
+       if (!link_cmd) {
+               IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+               return -ENOMEM;
+       }
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       if (il->stations[sta_id].lq)
+               kfree(il->stations[sta_id].lq);
+       else
+               D_INFO("Bcast sta rate scaling has not been initialized.\n");
+       il->stations[sta_id].lq = link_cmd;
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+       return il4965_update_bcast_station(il, &il->ctx);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+       unsigned long flags;
+       struct il_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&il->mutex);
+
+       /* Remove "disable" flag, to enable Tx for this TID */
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+       il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+                       u16 ssn)
+{
+       unsigned long flags;
+       int sta_id;
+       struct il_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&il->mutex);
+
+       sta_id = il_sta_id(sta);
+       if (sta_id == IL_INVALID_STATION)
+               return -ENXIO;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].sta.station_flags_msk = 0;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+       il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+       il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+       unsigned long flags;
+       int sta_id;
+       struct il_addsta_cmd sta_cmd;
+
+       lockdep_assert_held(&il->mutex);
+
+       sta_id = il_sta_id(sta);
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Invalid station for AGG tid %d\n", tid);
+               return -ENXIO;
+       }
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].sta.station_flags_msk = 0;
+       il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+       il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+       il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+       il->stations[sta_id].sta.sta.modify_mask =
+           STA_MODIFY_SLEEP_TX_COUNT_MSK;
+       il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+       il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+       if (il->cfg->ops->hcmd->set_rxon_chain) {
+               il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+               if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
+                       il_commit_rxon(il, &il->ctx);
+       }
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+       struct list_head *element;
+
+       D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+       while (!list_empty(&il->free_frames)) {
+               element = il->free_frames.next;
+               list_del(element);
+               kfree(list_entry(element, struct il_frame, list));
+               il->frames_count--;
+       }
+
+       if (il->frames_count) {
+               IL_WARN("%d frames still in use.  Did we lose one?\n",
+                       il->frames_count);
+               il->frames_count = 0;
+       }
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+       struct il_frame *frame;
+       struct list_head *element;
+       if (list_empty(&il->free_frames)) {
+               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+               if (!frame) {
+                       IL_ERR("Could not allocate frame!\n");
+                       return NULL;
+               }
+
+               il->frames_count++;
+               return frame;
+       }
+
+       element = il->free_frames.next;
+       list_del(element);
+       return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+       memset(frame, 0, sizeof(*frame));
+       list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+                        int left)
+{
+       lockdep_assert_held(&il->mutex);
+
+       if (!il->beacon_skb)
+               return 0;
+
+       if (il->beacon_skb->len > left)
+               return 0;
+
+       memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+       return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+                     struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+                     u32 frame_size)
+{
+       u16 tim_idx;
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+       /*
+        * The idx is relative to frame start but we start looking at the
+        * variable-length part of the beacon.
+        */
+       tim_idx = mgmt->u.beacon.variable - beacon;
+
+       /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+       while ((tim_idx < (frame_size - 2)) &&
+              (beacon[tim_idx] != WLAN_EID_TIM))
+               tim_idx += beacon[tim_idx + 1] + 2;
+
+       /* If TIM field was found, set variables */
+       if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+               tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+               tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+       } else
+               IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+       struct il_tx_beacon_cmd *tx_beacon_cmd;
+       u32 frame_size;
+       u32 rate_flags;
+       u32 rate;
+       /*
+        * We have to set up the TX command, the TX Beacon command, and the
+        * beacon contents.
+        */
+
+       lockdep_assert_held(&il->mutex);
+
+       if (!il->beacon_ctx) {
+               IL_ERR("trying to build beacon w/o beacon context!\n");
+               return 0;
+       }
+
+       /* Initialize memory */
+       tx_beacon_cmd = &frame->u.beacon;
+       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+       /* Set up TX beacon contents */
+       frame_size =
+           il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+                                    sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+       if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+               return 0;
+       if (!frame_size)
+               return 0;
+
+       /* Set up TX command fields */
+       tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+       tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+       tx_beacon_cmd->tx.tx_flags =
+           TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+           TX_CMD_FLG_STA_RATE_MSK;
+
+       /* Set up TX beacon command fields */
+       il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+                             frame_size);
+
+       /* Set up packet rate and flags */
+       rate = il_get_lowest_plcp(il, il->beacon_ctx);
+       il->mgmt_tx_ant =
+           il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
+                                il->hw_params.valid_tx_ant);
+       rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
+       if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+       tx_beacon_cmd->tx.rate_n_flags =
+           il4965_hw_set_rate_n_flags(rate, rate_flags);
+
+       return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+       struct il_frame *frame;
+       unsigned int frame_size;
+       int rc;
+
+       frame = il4965_get_free_frame(il);
+       if (!frame) {
+               IL_ERR("Could not obtain free frame buffer for beacon "
+                      "command.\n");
+               return -ENOMEM;
+       }
+
+       frame_size = il4965_hw_get_beacon_cmd(il, frame);
+       if (!frame_size) {
+               IL_ERR("Error configuring the beacon command\n");
+               il4965_free_frame(il, frame);
+               return -EINVAL;
+       }
+
+       rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+       il4965_free_frame(il, frame);
+
+       return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+       struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+       dma_addr_t addr = get_unaligned_le32(&tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               addr |=
+                   ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+                   16;
+
+       return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+       struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+       return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+       struct il_tfd_tb *tb = &tfd->tbs[idx];
+       u16 hi_n_len = len << 4;
+
+       put_unaligned_le32(addr, &tb->lo);
+       if (sizeof(dma_addr_t) > sizeof(u32))
+               hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+       tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+       tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+       return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+       struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+       struct il_tfd *tfd;
+       struct pci_dev *dev = il->pci_dev;
+       int idx = txq->q.read_ptr;
+       int i;
+       int num_tbs;
+
+       tfd = &tfd_tmp[idx];
+
+       /* Sanity check on number of chunks */
+       num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+       if (num_tbs >= IL_NUM_OF_TBS) {
+               IL_ERR("Too many chunks: %i\n", num_tbs);
+               /* @todo issue fatal error, it is quite serious situation */
+               return;
+       }
+
+       /* Unmap tx_cmd */
+       if (num_tbs)
+               pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+                                dma_unmap_len(&txq->meta[idx], len),
+                                PCI_DMA_BIDIRECTIONAL);
+
+       /* Unmap chunks, if any. */
+       for (i = 1; i < num_tbs; i++)
+               pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+                                il4965_tfd_tb_get_len(tfd, i),
+                                PCI_DMA_TODEVICE);
+
+       /* free SKB */
+       if (txq->txb) {
+               struct sk_buff *skb;
+
+               skb = txq->txb[txq->q.read_ptr].skb;
+
+               /* can be called from irqs-disabled context */
+               if (skb) {
+                       dev_kfree_skb_any(skb);
+                       txq->txb[txq->q.read_ptr].skb = NULL;
+               }
+       }
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+                               dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+       struct il_queue *q;
+       struct il_tfd *tfd, *tfd_tmp;
+       u32 num_tbs;
+
+       q = &txq->q;
+       tfd_tmp = (struct il_tfd *)txq->tfds;
+       tfd = &tfd_tmp[q->write_ptr];
+
+       if (reset)
+               memset(tfd, 0, sizeof(*tfd));
+
+       num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+       /* Each TFD can point to a maximum 20 Tx buffers */
+       if (num_tbs >= IL_NUM_OF_TBS) {
+               IL_ERR("Error can not send more than %d chunks\n",
+                      IL_NUM_OF_TBS);
+               return -EINVAL;
+       }
+
+       BUG_ON(addr & ~DMA_BIT_MASK(36));
+       if (unlikely(addr & ~IL_TX_DMA_MASK))
+               IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+       il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+       return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+       int txq_id = txq->q.id;
+
+       /* Circular buffer (TFD queue in DRAM) physical base address */
+       il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+       return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_alive_resp *palive;
+       struct delayed_work *pwork;
+
+       palive = &pkt->u.alive_frame;
+
+       D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+              palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+               D_INFO("Initialization Alive received.\n");
+               memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+                      sizeof(struct il_init_alive_resp));
+               pwork = &il->init_alive_start;
+       } else {
+               D_INFO("Runtime Alive received.\n");
+               memcpy(&il->card_alive, &pkt->u.alive_frame,
+                      sizeof(struct il_alive_resp));
+               pwork = &il->alive_start;
+       }
+
+       /* We delay the ALIVE response by 5ms to
+        * give the HW RF Kill time to activate... */
+       if (palive->is_valid == UCODE_VALID_OK)
+               queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+       else
+               IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last N_STATS
+ * was received.  We need to ensure we receive the stats in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(unsigned long data)
+{
+       struct il_priv *il = (struct il_priv *)data;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       /* dont send host command if rf-kill is on */
+       if (!il_is_ready_rf(il))
+               return;
+
+       il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il4965_beacon_notif *beacon =
+           (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+       u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+            le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+            beacon->beacon_notify_hdr.failure_frame,
+            le32_to_cpu(beacon->ibss_mgr_status),
+            le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+       il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+       unsigned long flags;
+
+       D_POWER("Stop all queues\n");
+
+       if (il->mac80211_registered)
+               ieee80211_stop_queues(il->hw);
+
+       _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+              CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+       _il_rd(il, CSR_UCODE_DRV_GP1);
+
+       spin_lock_irqsave(&il->reg_lock, flags);
+       if (!_il_grab_nic_access(il))
+               _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+       unsigned long status = il->status;
+
+       D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+                 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+                 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+                 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+       if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+               _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+                      CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+               il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+               if (!(flags & RXON_CARD_DISABLED)) {
+                       _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+                              CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+                       il_wr(il, HBUS_TARG_MBX_C,
+                             HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+               }
+       }
+
+       if (flags & CT_CARD_DISABLED)
+               il4965_perform_ct_kill_task(il);
+
+       if (flags & HW_CARD_DISABLED)
+               set_bit(S_RF_KILL_HW, &il->status);
+       else
+               clear_bit(S_RF_KILL_HW, &il->status);
+
+       if (!(flags & RXON_CARD_DISABLED))
+               il_scan_cancel(il);
+
+       if ((test_bit(S_RF_KILL_HW, &status) !=
+            test_bit(S_RF_KILL_HW, &il->status)))
+               wiphy_rfkill_set_hw_state(il->hw->wiphy,
+                                         test_bit(S_RF_KILL_HW, &il->status));
+       else
+               wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+       il->handlers[N_ALIVE] = il4965_hdl_alive;
+       il->handlers[N_ERROR] = il_hdl_error;
+       il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+       il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+       il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+       il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+       il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+       /*
+        * The same handler is used for both the REPLY to a discrete
+        * stats request from the host as well as for the periodic
+        * stats notifications (after received beacons) from the uCode.
+        */
+       il->handlers[C_STATS] = il4965_hdl_c_stats;
+       il->handlers[N_STATS] = il4965_hdl_stats;
+
+       il_setup_rx_scan_handlers(il);
+
+       /* status change handler */
+       il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+       il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+       /* Rx handlers */
+       il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+       il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+       /* block ack */
+       il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+       /* Set up hardware specific Rx handlers */
+       il->cfg->ops->lib->handler_setup(il);
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+       struct il_rx_buf *rxb;
+       struct il_rx_pkt *pkt;
+       struct il_rx_queue *rxq = &il->rxq;
+       u32 r, i;
+       int reclaim;
+       unsigned long flags;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty;
+
+       /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+        * buffer that the driver may process (last buffer filled by ucode). */
+       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+       i = rxq->read;
+
+       /* Rx interrupt, but nothing sent from uCode */
+       if (i == r)
+               D_RX("r = %d, i = %d\n", r, i);
+
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+
+       while (i != r) {
+               int len;
+
+               rxb = rxq->queue[i];
+
+               /* If an RXB doesn't have a Rx queue slot associated with it,
+                * then a bug has been introduced in the queue refilling
+                * routines -- catch it here */
+               BUG_ON(rxb == NULL);
+
+               rxq->queue[i] = NULL;
+
+               pci_unmap_page(il->pci_dev, rxb->page_dma,
+                              PAGE_SIZE << il->hw_params.rx_page_order,
+                              PCI_DMA_FROMDEVICE);
+               pkt = rxb_addr(rxb);
+
+               len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+               len += sizeof(u32);     /* account for status word */
+
+               /* Reclaim a command buffer only if this packet is a response
+                *   to a (driver-originated) command.
+                * If the packet (e.g. Rx frame) originated from uCode,
+                *   there is no command buffer to reclaim.
+                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+                *   but apparently a few don't get set; catch them here. */
+               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+                   (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
+                   (pkt->hdr.cmd != N_RX_MPDU) &&
+                   (pkt->hdr.cmd != N_COMPRESSED_BA) &&
+                   (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+
+               /* Based on type of command response or notification,
+                *   handle those that need handling via function in
+                *   handlers table.  See il4965_setup_handlers() */
+               if (il->handlers[pkt->hdr.cmd]) {
+                       D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+                            il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+                       il->isr_stats.handlers[pkt->hdr.cmd]++;
+                       il->handlers[pkt->hdr.cmd] (il, rxb);
+               } else {
+                       /* No handling needed */
+                       D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+                            i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+               }
+
+               /*
+                * XXX: After here, we should always check rxb->page
+                * against NULL before touching it or its virtual
+                * memory (pkt). Because some handler might have
+                * already taken or freed the pages.
+                */
+
+               if (reclaim) {
+                       /* Invoke any callbacks, transfer the buffer to caller,
+                        * and fire off the (possibly) blocking il_send_cmd()
+                        * as we reclaim the driver command queue */
+                       if (rxb->page)
+                               il_tx_cmd_complete(il, rxb);
+                       else
+                               IL_WARN("Claim null rxb?\n");
+               }
+
+               /* Reuse the page if possible. For notification packets and
+                * SKBs that fail to Rx correctly, add them back into the
+                * rx_free list for reuse later. */
+               spin_lock_irqsave(&rxq->lock, flags);
+               if (rxb->page != NULL) {
+                       rxb->page_dma =
+                           pci_map_page(il->pci_dev, rxb->page, 0,
+                                        PAGE_SIZE << il->hw_params.
+                                        rx_page_order, PCI_DMA_FROMDEVICE);
+                       list_add_tail(&rxb->list, &rxq->rx_free);
+                       rxq->free_count++;
+               } else
+                       list_add_tail(&rxb->list, &rxq->rx_used);
+
+               spin_unlock_irqrestore(&rxq->lock, flags);
+
+               i = (i + 1) & RX_QUEUE_MASK;
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode wont assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               il4965_rx_replenish_now(il);
+                               count = 0;
+                       }
+               }
+       }
+
+       /* Backtrack one entry */
+       rxq->read = i;
+       if (fill_rx)
+               il4965_rx_replenish_now(il);
+       else
+               il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+       /* wait to make sure we flush pending tasklet */
+       synchronize_irq(il->pci_dev->irq);
+       tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+       u32 inta, handled = 0;
+       u32 inta_fh;
+       unsigned long flags;
+       u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+       u32 inta_mask;
+#endif
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Ack/clear/reset pending uCode interrupts.
+        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+       inta = _il_rd(il, CSR_INT);
+       _il_wr(il, CSR_INT, inta);
+
+       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+        * Any new interrupts that happen after this, either while we're
+        * in this tasklet, or later, will show up in next ISR/tasklet. */
+       inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+       _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & IL_DL_ISR) {
+               /* just for debug */
+               inta_mask = _il_rd(il, CSR_INT_MASK);
+               D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+                     inta_mask, inta_fh);
+       }
+#endif
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+        * atomic, make sure that inta covers all the interrupts that
+        * we've discovered, even if FH interrupt came in just after
+        * reading CSR_INT. */
+       if (inta_fh & CSR49_FH_INT_RX_MASK)
+               inta |= CSR_INT_BIT_FH_RX;
+       if (inta_fh & CSR49_FH_INT_TX_MASK)
+               inta |= CSR_INT_BIT_FH_TX;
+
+       /* Now service all interrupt bits discovered above. */
+       if (inta & CSR_INT_BIT_HW_ERR) {
+               IL_ERR("Hardware error detected.  Restarting.\n");
+
+               /* Tell the device to stop sending interrupts */
+               il_disable_interrupts(il);
+
+               il->isr_stats.hw++;
+               il_irq_handle_error(il);
+
+               handled |= CSR_INT_BIT_HW_ERR;
+
+               return;
+       }
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & (IL_DL_ISR)) {
+               /* NIC fires this, but we don't use it, redundant with WAKEUP */
+               if (inta & CSR_INT_BIT_SCD) {
+                       D_ISR("Scheduler finished to transmit "
+                             "the frame/frames.\n");
+                       il->isr_stats.sch++;
+               }
+
+               /* Alive notification via Rx interrupt will do the real work */
+               if (inta & CSR_INT_BIT_ALIVE) {
+                       D_ISR("Alive interrupt\n");
+                       il->isr_stats.alive++;
+               }
+       }
+#endif
+       /* Safely ignore these bits for debug checks below */
+       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+       /* HW RF KILL switch toggled */
+       if (inta & CSR_INT_BIT_RF_KILL) {
+               int hw_rf_kill = 0;
+               if (!
+                   (_il_rd(il, CSR_GP_CNTRL) &
+                    CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+                       hw_rf_kill = 1;
+
+               IL_WARN("RF_KILL bit toggled to %s.\n",
+                       hw_rf_kill ? "disable radio" : "enable radio");
+
+               il->isr_stats.rfkill++;
+
+               /* driver only loads ucode once setting the interface up.
+                * the driver allows loading the ucode even if the radio
+                * is killed. Hence update the killswitch state here. The
+                * rfkill handler will care about restarting if needed.
+                */
+               if (!test_bit(S_ALIVE, &il->status)) {
+                       if (hw_rf_kill)
+                               set_bit(S_RF_KILL_HW, &il->status);
+                       else
+                               clear_bit(S_RF_KILL_HW, &il->status);
+                       wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+               }
+
+               handled |= CSR_INT_BIT_RF_KILL;
+       }
+
+       /* Chip got too hot and stopped itself */
+       if (inta & CSR_INT_BIT_CT_KILL) {
+               IL_ERR("Microcode CT kill error detected.\n");
+               il->isr_stats.ctkill++;
+               handled |= CSR_INT_BIT_CT_KILL;
+       }
+
+       /* Error detected by uCode */
+       if (inta & CSR_INT_BIT_SW_ERR) {
+               IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+                      inta);
+               il->isr_stats.sw++;
+               il_irq_handle_error(il);
+               handled |= CSR_INT_BIT_SW_ERR;
+       }
+
+       /*
+        * uCode wakes up after power-down sleep.
+        * Tell device about any new tx or host commands enqueued,
+        * and about any Rx buffers made available while asleep.
+        */
+       if (inta & CSR_INT_BIT_WAKEUP) {
+               D_ISR("Wakeup interrupt\n");
+               il_rx_queue_update_write_ptr(il, &il->rxq);
+               for (i = 0; i < il->hw_params.max_txq_num; i++)
+                       il_txq_update_write_ptr(il, &il->txq[i]);
+               il->isr_stats.wakeup++;
+               handled |= CSR_INT_BIT_WAKEUP;
+       }
+
+       /* All uCode command responses, including Tx command responses,
+        * Rx "responses" (frame-received notification), and other
+        * notifications from uCode come through here*/
+       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+               il4965_rx_handle(il);
+               il->isr_stats.rx++;
+               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+       }
+
+       /* This "Tx" DMA channel is used only for loading uCode */
+       if (inta & CSR_INT_BIT_FH_TX) {
+               D_ISR("uCode load interrupt\n");
+               il->isr_stats.tx++;
+               handled |= CSR_INT_BIT_FH_TX;
+               /* Wake up uCode load routine, now that load is complete */
+               il->ucode_write_complete = 1;
+               wake_up(&il->wait_command_queue);
+       }
+
+       if (inta & ~handled) {
+               IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+               il->isr_stats.unhandled++;
+       }
+
+       if (inta & ~(il->inta_mask)) {
+               IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+                       inta & ~il->inta_mask);
+               IL_WARN("   with FH49_INT = 0x%08x\n", inta_fh);
+       }
+
+       /* Re-enable all interrupts */
+       /* only Re-enable if disabled by irq */
+       if (test_bit(S_INT_ENABLED, &il->status))
+               il_enable_interrupts(il);
+       /* Re-enable RF_KILL if it occurred */
+       else if (handled & CSR_INT_BIT_RF_KILL)
+               il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & (IL_DL_ISR)) {
+               inta = _il_rd(il, CSR_INT);
+               inta_mask = _il_rd(il, CSR_INT_MASK);
+               inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+               D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+                     "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+       }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+                       char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret)
+               IL_ERR("%s is not in hex or decimal form.\n", buf);
+       else {
+               il->debug_level = val;
+               if (il_alloc_traffic_mem(il))
+                       IL_ERR("Not enough memory to generate traffic log\n");
+       }
+       return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
+                  il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+                       char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+
+       if (!il_is_alive(il))
+               return -EAGAIN;
+
+       return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+
+       if (!il_is_ready_rf(il))
+               return sprintf(buf, "off\n");
+       else
+               return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+                     const char *buf, size_t count)
+{
+       struct il_priv *il = dev_get_drvdata(d);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret)
+               IL_INFO("%s is not in decimal form.\n", buf);
+       else {
+               ret = il_set_tx_power(il, val, false);
+               if (ret)
+                       IL_ERR("failed setting tx power (0x%d).\n", ret);
+               else
+                       ret = count;
+       }
+       return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
+                  il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+       &dev_attr_temperature.attr,
+       &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+       &dev_attr_debug_level.attr,
+#endif
+       NULL
+};
+
+static struct attribute_group il_attribute_group = {
+       .name = NULL,           /* put in device directory */
+       .attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+       il_free_fw_desc(il->pci_dev, &il->ucode_code);
+       il_free_fw_desc(il->pci_dev, &il->ucode_data);
+       il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+       il_free_fw_desc(il->pci_dev, &il->ucode_init);
+       il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+       il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+       /* Remove all resets to allow NIC to operate */
+       _il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+                                 void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+       const char *name_pre = il->cfg->fw_name_pre;
+       char tag[8];
+
+       if (first) {
+               il->fw_idx = il->cfg->ucode_api_max;
+               sprintf(tag, "%d", il->fw_idx);
+       } else {
+               il->fw_idx--;
+               sprintf(tag, "%d", il->fw_idx);
+       }
+
+       if (il->fw_idx < il->cfg->ucode_api_min) {
+               IL_ERR("no suitable firmware found!\n");
+               return -ENOENT;
+       }
+
+       sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+       D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+       return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+                                      &il->pci_dev->dev, GFP_KERNEL, il,
+                                      il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+       const void *inst, *data, *init, *init_data, *boot;
+       size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+                    struct il4965_firmware_pieces *pieces)
+{
+       struct il_ucode_header *ucode = (void *)ucode_raw->data;
+       u32 api_ver, hdr_size;
+       const u8 *src;
+
+       il->ucode_ver = le32_to_cpu(ucode->ver);
+       api_ver = IL_UCODE_API(il->ucode_ver);
+
+       switch (api_ver) {
+       default:
+       case 0:
+       case 1:
+       case 2:
+               hdr_size = 24;
+               if (ucode_raw->size < hdr_size) {
+                       IL_ERR("File size too small!\n");
+                       return -EINVAL;
+               }
+               pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+               pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+               pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+               pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+               pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+               src = ucode->v1.data;
+               break;
+       }
+
+       /* Verify size of file vs. image size info in file's header */
+       if (ucode_raw->size !=
+           hdr_size + pieces->inst_size + pieces->data_size +
+           pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+               IL_ERR("uCode file size %d does not match expected size\n",
+                      (int)ucode_raw->size);
+               return -EINVAL;
+       }
+
+       pieces->inst = src;
+       src += pieces->inst_size;
+       pieces->data = src;
+       src += pieces->data_size;
+       pieces->init = src;
+       src += pieces->init_size;
+       pieces->init_data = src;
+       src += pieces->init_data_size;
+       pieces->boot = src;
+       src += pieces->boot_size;
+
+       return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+       struct il_priv *il = context;
+       struct il_ucode_header *ucode;
+       int err;
+       struct il4965_firmware_pieces pieces;
+       const unsigned int api_max = il->cfg->ucode_api_max;
+       const unsigned int api_min = il->cfg->ucode_api_min;
+       u32 api_ver;
+
+       u32 max_probe_length = 200;
+       u32 standard_phy_calibration_size =
+           IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+       memset(&pieces, 0, sizeof(pieces));
+
+       if (!ucode_raw) {
+               if (il->fw_idx <= il->cfg->ucode_api_max)
+                       IL_ERR("request for firmware file '%s' failed.\n",
+                              il->firmware_name);
+               goto try_again;
+       }
+
+       D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+              ucode_raw->size);
+
+       /* Make sure that we got at least the API version number */
+       if (ucode_raw->size < 4) {
+               IL_ERR("File size way too small!\n");
+               goto try_again;
+       }
+
+       /* Data from ucode file:  header followed by uCode images */
+       ucode = (struct il_ucode_header *)ucode_raw->data;
+
+       err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+       if (err)
+               goto try_again;
+
+       api_ver = IL_UCODE_API(il->ucode_ver);
+
+       /*
+        * api_ver should match the api version forming part of the
+        * firmware filename ... but we don't check for that and only rely
+        * on the API version read from firmware header from here on forward
+        */
+       if (api_ver < api_min || api_ver > api_max) {
+               IL_ERR("Driver unable to support your firmware API. "
+                      "Driver supports v%u, firmware is v%u.\n", api_max,
+                      api_ver);
+               goto try_again;
+       }
+
+       if (api_ver != api_max)
+               IL_ERR("Firmware has old API version. Expected v%u, "
+                      "got v%u. New firmware can be obtained "
+                      "from http://www.intellinuxwireless.org.\n", api_max,
+                      api_ver);
+
+       IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+               IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+               IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+       snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+                "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+                IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+                IL_UCODE_SERIAL(il->ucode_ver));
+
+       /*
+        * For any of the failures below (before allocating pci memory)
+        * we will try to load a version with a smaller API -- maybe the
+        * user just got a corrupted version of the latest API.
+        */
+
+       D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+       D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
+       D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
+       D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
+       D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
+       D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
+
+       /* Verify that uCode images will fit in card's SRAM */
+       if (pieces.inst_size > il->hw_params.max_inst_size) {
+               IL_ERR("uCode instr len %Zd too large to fit in\n",
+                      pieces.inst_size);
+               goto try_again;
+       }
+
+       if (pieces.data_size > il->hw_params.max_data_size) {
+               IL_ERR("uCode data len %Zd too large to fit in\n",
+                      pieces.data_size);
+               goto try_again;
+       }
+
+       if (pieces.init_size > il->hw_params.max_inst_size) {
+               IL_ERR("uCode init instr len %Zd too large to fit in\n",
+                      pieces.init_size);
+               goto try_again;
+       }
+
+       if (pieces.init_data_size > il->hw_params.max_data_size) {
+               IL_ERR("uCode init data len %Zd too large to fit in\n",
+                      pieces.init_data_size);
+               goto try_again;
+       }
+
+       if (pieces.boot_size > il->hw_params.max_bsm_size) {
+               IL_ERR("uCode boot instr len %Zd too large to fit in\n",
+                      pieces.boot_size);
+               goto try_again;
+       }
+
+       /* Allocate ucode buffers for card's bus-master loading ... */
+
+       /* Runtime instructions and 2 copies of data:
+        * 1) unmodified from disk
+        * 2) backup cache for save/restore during power-downs */
+       il->ucode_code.len = pieces.inst_size;
+       il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+       il->ucode_data.len = pieces.data_size;
+       il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+       il->ucode_data_backup.len = pieces.data_size;
+       il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+       if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+           !il->ucode_data_backup.v_addr)
+               goto err_pci_alloc;
+
+       /* Initialization instructions and data */
+       if (pieces.init_size && pieces.init_data_size) {
+               il->ucode_init.len = pieces.init_size;
+               il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+               il->ucode_init_data.len = pieces.init_data_size;
+               il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+               if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Bootstrap (instructions only, no data) */
+       if (pieces.boot_size) {
+               il->ucode_boot.len = pieces.boot_size;
+               il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+               if (!il->ucode_boot.v_addr)
+                       goto err_pci_alloc;
+       }
+
+       /* Now that we can no longer fail, copy information */
+
+       il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+       /* Copy images into buffers for card's bus-master reads ... */
+
+       /* Runtime instructions (first block of data in file) */
+       D_INFO("Copying (but not loading) uCode instr len %Zd\n",
+              pieces.inst_size);
+       memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+       D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+              il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+       /*
+        * Runtime data
+        * NOTE:  Copy into backup buffer will be done in il_up()
+        */
+       D_INFO("Copying (but not loading) uCode data len %Zd\n",
+              pieces.data_size);
+       memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+       memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+       /* Initialization instructions */
+       if (pieces.init_size) {
+               D_INFO("Copying (but not loading) init instr len %Zd\n",
+                      pieces.init_size);
+               memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+       }
+
+       /* Initialization data */
+       if (pieces.init_data_size) {
+               D_INFO("Copying (but not loading) init data len %Zd\n",
+                      pieces.init_data_size);
+               memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+                      pieces.init_data_size);
+       }
+
+       /* Bootstrap instructions */
+       D_INFO("Copying (but not loading) boot instr len %Zd\n",
+              pieces.boot_size);
+       memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+       /*
+        * figure out the offset of chain noise reset and gain commands
+        * base on the size of standard phy calibration commands table size
+        */
+       il->_4965.phy_calib_chain_noise_reset_cmd =
+           standard_phy_calibration_size;
+       il->_4965.phy_calib_chain_noise_gain_cmd =
+           standard_phy_calibration_size + 1;
+
+       /**************************************************
+        * This is still part of probe() in a sense...
+        *
+        * 9. Setup and register with mac80211 and debugfs
+        **************************************************/
+       err = il4965_mac_setup_register(il, max_probe_length);
+       if (err)
+               goto out_unbind;
+
+       err = il_dbgfs_register(il, DRV_NAME);
+       if (err)
+               IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+                      err);
+
+       err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+       if (err) {
+               IL_ERR("failed to create sysfs device attributes\n");
+               goto out_unbind;
+       }
+
+       /* We have our copies now, allow OS release its copies */
+       release_firmware(ucode_raw);
+       complete(&il->_4965.firmware_loading_complete);
+       return;
+
+try_again:
+       /* try next, if any */
+       if (il4965_request_firmware(il, false))
+               goto out_unbind;
+       release_firmware(ucode_raw);
+       return;
+
+err_pci_alloc:
+       IL_ERR("failed to allocate pci memory\n");
+       il4965_dealloc_ucode_pci(il);
+out_unbind:
+       complete(&il->_4965.firmware_loading_complete);
+       device_release_driver(&il->pci_dev->dev);
+       release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+       "OK",
+       "FAIL",
+       "BAD_PARAM",
+       "BAD_CHECKSUM",
+       "NMI_INTERRUPT_WDG",
+       "SYSASSERT",
+       "FATAL_ERROR",
+       "BAD_COMMAND",
+       "HW_ERROR_TUNE_LOCK",
+       "HW_ERROR_TEMPERATURE",
+       "ILLEGAL_CHAN_FREQ",
+       "VCC_NOT_STBL",
+       "FH49_ERROR",
+       "NMI_INTERRUPT_HOST",
+       "NMI_INTERRUPT_ACTION_PT",
+       "NMI_INTERRUPT_UNKNOWN",
+       "UCODE_VERSION_MISMATCH",
+       "HW_ERROR_ABS_LOCK",
+       "HW_ERROR_CAL_LOCK_FAIL",
+       "NMI_INTERRUPT_INST_ACTION_PT",
+       "NMI_INTERRUPT_DATA_ACTION_PT",
+       "NMI_TRM_HW_ER",
+       "NMI_INTERRUPT_TRM",
+       "NMI_INTERRUPT_BREAK_POINT",
+       "DEBUG_0",
+       "DEBUG_1",
+       "DEBUG_2",
+       "DEBUG_3",
+};
+
+static struct {
+       char *name;
+       u8 num;
+} advanced_lookup[] = {
+       {
+       "NMI_INTERRUPT_WDG", 0x34}, {
+       "SYSASSERT", 0x35}, {
+       "UCODE_VERSION_MISMATCH", 0x37}, {
+       "BAD_COMMAND", 0x38}, {
+       "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+       "FATAL_ERROR", 0x3D}, {
+       "NMI_TRM_HW_ERR", 0x46}, {
+       "NMI_INTERRUPT_TRM", 0x4C}, {
+       "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+       "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+       "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+       "NMI_INTERRUPT_HOST", 0x66}, {
+       "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+       "NMI_INTERRUPT_UNKNOWN", 0x84}, {
+       "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+       int i;
+       int max = ARRAY_SIZE(desc_lookup_text);
+
+       if (num < max)
+               return desc_lookup_text[num];
+
+       max = ARRAY_SIZE(advanced_lookup) - 1;
+       for (i = 0; i < max; i++) {
+               if (advanced_lookup[i].num == num)
+                       break;
+       }
+       return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+       u32 data2, line;
+       u32 desc, time, count, base, data1;
+       u32 blink1, blink2, ilink1, ilink2;
+       u32 pc, hcmd;
+
+       if (il->ucode_type == UCODE_INIT)
+               base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+       else
+               base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+       if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+               IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+                      base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+               return;
+       }
+
+       count = il_read_targ_mem(il, base);
+
+       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+               IL_ERR("Start IWL Error Log Dump:\n");
+               IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+       }
+
+       desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+       il->isr_stats.err_code = desc;
+       pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+       blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+       blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+       ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+       ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+       data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+       data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+       line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+       time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+       hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+       IL_ERR("Desc                                  Time       "
+              "data1      data2      line\n");
+       IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+              il4965_desc_lookup(desc), desc, time, data1, data2, line);
+       IL_ERR("pc      blink1  blink2  ilink1  ilink2  hcmd\n");
+       IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+              blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+       struct il_ct_kill_config cmd;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&il->lock, flags);
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+              CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       cmd.critical_temperature_R =
+           cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+       ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+       if (ret)
+               IL_ERR("C_CT_KILL_CONFIG failed\n");
+       else
+               D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+                      "critical temperature is %d\n",
+                      il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+       IL_TX_FIFO_VO,
+       IL_TX_FIFO_VI,
+       IL_TX_FIFO_BE,
+       IL_TX_FIFO_BK,
+       IL49_CMD_FIFO_NUM,
+       IL_TX_FIFO_UNUSED,
+       IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+       u32 a;
+       unsigned long flags;
+       int i, chan;
+       u32 reg_val;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Clear 4965's internal Tx Scheduler data base */
+       il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+       a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+       for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+               il_write_targ_mem(il, a, 0);
+       for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+               il_write_targ_mem(il, a, 0);
+       for (;
+            a <
+            il->scd_base_addr +
+            IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+            a += 4)
+               il_write_targ_mem(il, a, 0);
+
+       /* Tel 4965 where to find Tx byte count tables */
+       il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+       /* Enable DMA channel */
+       for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+               il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+                     FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+                     FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+       /* Update FH chicken bits */
+       reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+       il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+             reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+       /* Disable chain mode for all queues */
+       il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+       /* Initialize each Tx queue (including the command queue) */
+       for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+               /* TFD circular buffer read/write idxes */
+               il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+               il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+               /* Max Tx Window size for Scheduler-ACK mode */
+               il_write_targ_mem(il,
+                                 il->scd_base_addr +
+                                 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+                                 (SCD_WIN_SIZE <<
+                                  IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+                                 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+               /* Frame limit */
+               il_write_targ_mem(il,
+                                 il->scd_base_addr +
+                                 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+                                 sizeof(u32),
+                                 (SCD_FRAME_LIMIT <<
+                                  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+                                 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+       }
+       il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+                  (1 << il->hw_params.max_txq_num) - 1);
+
+       /* Activate all Tx DMA/FIFO channels */
+       il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+       il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+       /* make sure all queue are not stopped */
+       memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+       for (i = 0; i < 4; i++)
+               atomic_set(&il->queue_stop_count[i], 0);
+
+       /* reset to 0 to enable all the queue first */
+       il->txq_ctx_active_msk = 0;
+       /* Map each Tx/cmd queue to its corresponding fifo */
+       BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+       for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+               int ac = default_queue_to_tx_fifo[i];
+
+               il_txq_ctx_activate(il, i);
+
+               if (ac == IL_TX_FIFO_UNUSED)
+                       continue;
+
+               il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+       }
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+       int ret = 0;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       D_INFO("Runtime Alive received.\n");
+
+       if (il->card_alive.is_valid != UCODE_VALID_OK) {
+               /* We had an error bringing up the hardware, so take it
+                * all the way back down so we can try again */
+               D_INFO("Alive failed.\n");
+               goto restart;
+       }
+
+       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "runtime" alive if code weren't properly loaded.  */
+       if (il4965_verify_ucode(il)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               D_INFO("Bad runtime uCode load.\n");
+               goto restart;
+       }
+
+       ret = il4965_alive_notify(il);
+       if (ret) {
+               IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+               goto restart;
+       }
+
+       /* After the ALIVE response, we can send host commands to the uCode */
+       set_bit(S_ALIVE, &il->status);
+
+       /* Enable watchdog to monitor the driver tx queues */
+       il_setup_watchdog(il);
+
+       if (il_is_rfkill(il))
+               return;
+
+       ieee80211_wake_queues(il->hw);
+
+       il->active_rate = RATES_MASK;
+
+       if (il_is_associated_ctx(ctx)) {
+               struct il_rxon_cmd *active_rxon =
+                   (struct il_rxon_cmd *)&ctx->active;
+               /* apply any changes in staging */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       } else {
+               /* Initialize our rx_config data */
+               il_connection_init_rx_config(il, &il->ctx);
+
+               if (il->cfg->ops->hcmd->set_rxon_chain)
+                       il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+       }
+
+       /* Configure bluetooth coexistence if enabled */
+       il_send_bt_config(il);
+
+       il4965_reset_run_time_calib(il);
+
+       set_bit(S_READY, &il->status);
+
+       /* Configure the adapter for unassociated operation */
+       il_commit_rxon(il, ctx);
+
+       /* At this point, the NIC is initialized and operational */
+       il4965_rf_kill_ct_config(il);
+
+       D_INFO("ALIVE processing complete.\n");
+       wake_up(&il->wait_command_queue);
+
+       il_power_update_mode(il, true);
+       D_INFO("Updated power mode\n");
+
+       return;
+
+restart:
+       queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+       unsigned long flags;
+       int exit_pending;
+
+       D_INFO(DRV_NAME " is going down\n");
+
+       il_scan_cancel_timeout(il, 200);
+
+       exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+       /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+        * to prevent rearm timer */
+       del_timer_sync(&il->watchdog);
+
+       il_clear_ucode_stations(il, NULL);
+       il_dealloc_bcast_stations(il);
+       il_clear_driver_stations(il);
+
+       /* Unblock any waiting calls */
+       wake_up_all(&il->wait_command_queue);
+
+       /* Wipe out the EXIT_PENDING status bit if we are not actually
+        * exiting the module */
+       if (!exit_pending)
+               clear_bit(S_EXIT_PENDING, &il->status);
+
+       /* stop and reset the on-board processor */
+       _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       /* tell the device to stop sending interrupts */
+       spin_lock_irqsave(&il->lock, flags);
+       il_disable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+       il4965_synchronize_irq(il);
+
+       if (il->mac80211_registered)
+               ieee80211_stop_queues(il->hw);
+
+       /* If we have not previously called il_init() then
+        * clear all bits but the RF Kill bit and return */
+       if (!il_is_init(il)) {
+               il->status =
+                   test_bit(S_RF_KILL_HW,
+                            &il->
+                            status) << S_RF_KILL_HW |
+                   test_bit(S_GEO_CONFIGURED,
+                            &il->
+                            status) << S_GEO_CONFIGURED |
+                   test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+               goto exit;
+       }
+
+       /* ...otherwise clear out all the status bits but the RF Kill
+        * bit and continue taking the NIC down. */
+       il->status &=
+           test_bit(S_RF_KILL_HW,
+                    &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+                                                            &il->
+                                                            status) <<
+           S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+                                       &il->
+                                       status) << S_FW_ERROR |
+           test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+       il4965_txq_ctx_stop(il);
+       il4965_rxq_stop(il);
+
+       /* Power-down device's busmaster DMA clocks */
+       il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(5);
+
+       /* Make sure (redundant) we've released our request to stay awake */
+       il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /* Stop the device, and put it in low power state */
+       il_apm_stop(il);
+
+exit:
+       memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+       dev_kfree_skb(il->beacon_skb);
+       il->beacon_skb = NULL;
+
+       /* clear out any free frames */
+       il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+       mutex_lock(&il->mutex);
+       __il4965_down(il);
+       mutex_unlock(&il->mutex);
+
+       il4965_cancel_deferred_work(il);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int
+il4965_set_hw_ready(struct il_priv *il)
+{
+       int ret = 0;
+
+       il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                  CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+       /* See if we got it */
+       ret =
+           _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+                        CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+                        CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+       if (ret != -ETIMEDOUT)
+               il->hw_ready = true;
+       else
+               il->hw_ready = false;
+
+       D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
+       return ret;
+}
+
+static int
+il4965_prepare_card_hw(struct il_priv *il)
+{
+       int ret = 0;
+
+       D_INFO("il4965_prepare_card_hw enter\n");
+
+       ret = il4965_set_hw_ready(il);
+       if (il->hw_ready)
+               return ret;
+
+       /* If HW is not ready, prepare the conditions to check again */
+       il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+       ret =
+           _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+                        ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+                        CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+       /* HW should be ready by now, check again. */
+       if (ret != -ETIMEDOUT)
+               il4965_set_hw_ready(il);
+
+       return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+       int i;
+       int ret;
+
+       if (test_bit(S_EXIT_PENDING, &il->status)) {
+               IL_WARN("Exit pending; will not bring the NIC up\n");
+               return -EIO;
+       }
+
+       if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+               IL_ERR("ucode not available for device bringup\n");
+               return -EIO;
+       }
+
+       ret = il4965_alloc_bcast_station(il, &il->ctx);
+       if (ret) {
+               il_dealloc_bcast_stations(il);
+               return ret;
+       }
+
+       il4965_prepare_card_hw(il);
+
+       if (!il->hw_ready) {
+               IL_WARN("Exit HW not ready\n");
+               return -EIO;
+       }
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(S_RF_KILL_HW, &il->status);
+       else
+               set_bit(S_RF_KILL_HW, &il->status);
+
+       if (il_is_rfkill(il)) {
+               wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+               il_enable_interrupts(il);
+               IL_WARN("Radio disabled by HW RF Kill switch\n");
+               return 0;
+       }
+
+       _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+       /* must be initialised before il_hw_nic_init */
+       il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+       ret = il4965_hw_nic_init(il);
+       if (ret) {
+               IL_ERR("Unable to init nic\n");
+               return ret;
+       }
+
+       /* make sure rfkill handshake bits are cleared */
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+       /* clear (again), then enable host interrupts */
+       _il_wr(il, CSR_INT, 0xFFFFFFFF);
+       il_enable_interrupts(il);
+
+       /* really make sure rfkill handshake bits are cleared */
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+       _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+       /* Copy original ucode data image from disk into backup cache.
+        * This will be used to initialize the on-board processor's
+        * data SRAM for a clean start when the runtime program first loads. */
+       memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+              il->ucode_data.len);
+
+       for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+               /* load bootstrap state machine,
+                * load bootstrap program into processor's memory,
+                * prepare to load the "initialize" uCode */
+               ret = il->cfg->ops->lib->load_ucode(il);
+
+               if (ret) {
+                       IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+                       continue;
+               }
+
+               /* start card; "initialize" will load runtime ucode */
+               il4965_nic_start(il);
+
+               D_INFO(DRV_NAME " is coming up\n");
+
+               return 0;
+       }
+
+       set_bit(S_EXIT_PENDING, &il->status);
+       __il4965_down(il);
+       clear_bit(S_EXIT_PENDING, &il->status);
+
+       /* tried to restart and config the device for as long as our
+        * patience could withstand */
+       IL_ERR("Unable to initialize device after %d attempts.\n", i);
+       return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+       struct il_priv *il =
+           container_of(data, struct il_priv, init_alive_start.work);
+
+       mutex_lock(&il->mutex);
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               goto out;
+
+       il->cfg->ops->lib->init_alive_start(il);
+out:
+       mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+       struct il_priv *il =
+           container_of(data, struct il_priv, alive_start.work);
+
+       mutex_lock(&il->mutex);
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               goto out;
+
+       il4965_alive_start(il);
+out:
+       mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+       struct il_priv *il = container_of(work, struct il_priv,
+                                         run_time_calib_work);
+
+       mutex_lock(&il->mutex);
+
+       if (test_bit(S_EXIT_PENDING, &il->status) ||
+           test_bit(S_SCANNING, &il->status)) {
+               mutex_unlock(&il->mutex);
+               return;
+       }
+
+       if (il->start_calib) {
+               il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+               il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+       }
+
+       mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+       struct il_priv *il = container_of(data, struct il_priv, restart);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+               mutex_lock(&il->mutex);
+               il->ctx.vif = NULL;
+               il->is_open = 0;
+
+               __il4965_down(il);
+
+               mutex_unlock(&il->mutex);
+               il4965_cancel_deferred_work(il);
+               ieee80211_restart_hw(il->hw);
+       } else {
+               il4965_down(il);
+
+               mutex_lock(&il->mutex);
+               if (test_bit(S_EXIT_PENDING, &il->status)) {
+                       mutex_unlock(&il->mutex);
+                       return;
+               }
+
+               __il4965_up(il);
+               mutex_unlock(&il->mutex);
+       }
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+       struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       mutex_lock(&il->mutex);
+       il4965_rx_replenish(il);
+       mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT    (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+       int ret;
+       struct ieee80211_hw *hw = il->hw;
+
+       hw->rate_control_algorithm = "iwl-4965-rs";
+
+       /* Tell mac80211 our characteristics */
+       hw->flags =
+           IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+           IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+           IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+       if (il->cfg->sku & IL_SKU_N)
+               hw->flags |=
+                   IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+                   IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+       hw->sta_data_size = sizeof(struct il_station_priv);
+       hw->vif_data_size = sizeof(struct il_vif_priv);
+
+       hw->wiphy->interface_modes |= il->ctx.interface_modes;
+       hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+
+       hw->wiphy->flags |=
+           WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+       /*
+        * For now, disable PS by default because it affects
+        * RX performance significantly.
+        */
+       hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+       /* we create the 802.11 header and a zero-length SSID element */
+       hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+       /* Default value; 4 EDCA QOS priorities */
+       hw->queues = 4;
+
+       hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+       if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+               il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                   &il->bands[IEEE80211_BAND_2GHZ];
+       if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+               il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                   &il->bands[IEEE80211_BAND_5GHZ];
+
+       il_leds_init(il);
+
+       ret = ieee80211_register_hw(il->hw);
+       if (ret) {
+               IL_ERR("Failed to register hw (error %d)\n", ret);
+               return ret;
+       }
+       il->mac80211_registered = 1;
+
+       return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+       struct il_priv *il = hw->priv;
+       int ret;
+
+       D_MAC80211("enter\n");
+
+       /* we should be verifying the device is ready to be opened */
+       mutex_lock(&il->mutex);
+       ret = __il4965_up(il);
+       mutex_unlock(&il->mutex);
+
+       if (ret)
+               return ret;
+
+       if (il_is_rfkill(il))
+               goto out;
+
+       D_INFO("Start UP work done.\n");
+
+       /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+        * mac80211 will not be run successfully. */
+       ret = wait_event_timeout(il->wait_command_queue,
+                                test_bit(S_READY, &il->status),
+                                UCODE_READY_TIMEOUT);
+       if (!ret) {
+               if (!test_bit(S_READY, &il->status)) {
+                       IL_ERR("START_ALIVE timeout after %dms.\n",
+                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
+                       return -ETIMEDOUT;
+               }
+       }
+
+       il4965_led_enable(il);
+
+out:
+       il->is_open = 1;
+       D_MAC80211("leave\n");
+       return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+       struct il_priv *il = hw->priv;
+
+       D_MAC80211("enter\n");
+
+       if (!il->is_open)
+               return;
+
+       il->is_open = 0;
+
+       il4965_down(il);
+
+       flush_workqueue(il->workqueue);
+
+       /* User space software may expect getting rfkill changes
+        * even if interface is down */
+       _il_wr(il, CSR_INT, 0xFFFFFFFF);
+       il_enable_rfkill_int(il);
+
+       D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct il_priv *il = hw->priv;
+
+       D_MACDUMP("enter\n");
+
+       D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+            ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+       if (il4965_tx_skb(il, skb))
+               dev_kfree_skb_any(skb);
+
+       D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                          struct ieee80211_key_conf *keyconf,
+                          struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+       struct il_priv *il = hw->priv;
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       D_MAC80211("enter\n");
+
+       il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
+                              phase1key);
+
+       D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key)
+{
+       struct il_priv *il = hw->priv;
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct il_rxon_context *ctx = vif_priv->ctx;
+       int ret;
+       u8 sta_id;
+       bool is_default_wep_key = false;
+
+       D_MAC80211("enter\n");
+
+       if (il->cfg->mod_params->sw_crypto) {
+               D_MAC80211("leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+       if (sta_id == IL_INVALID_STATION)
+               return -EINVAL;
+
+       mutex_lock(&il->mutex);
+       il_scan_cancel_timeout(il, 100);
+
+       /*
+        * If we are getting WEP group key and we didn't receive any key mapping
+        * so far, we are in legacy wep mode (group key only), otherwise we are
+        * in 1X mode.
+        * In legacy wep mode, we use another host command to the uCode.
+        */
+       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+            key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+               if (cmd == SET_KEY)
+                       is_default_wep_key = !ctx->key_mapping_keys;
+               else
+                       is_default_wep_key =
+                           (key->hw_key_idx == HW_KEY_DEFAULT);
+       }
+
+       switch (cmd) {
+       case SET_KEY:
+               if (is_default_wep_key)
+                       ret =
+                           il4965_set_default_wep_key(il, vif_priv->ctx, key);
+               else
+                       ret =
+                           il4965_set_dynamic_key(il, vif_priv->ctx, key,
+                                                  sta_id);
+
+               D_MAC80211("enable hwcrypto key\n");
+               break;
+       case DISABLE_KEY:
+               if (is_default_wep_key)
+                       ret = il4965_remove_default_wep_key(il, ctx, key);
+               else
+                       ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+
+               D_MAC80211("disable hwcrypto key\n");
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&il->mutex);
+       D_MAC80211("leave\n");
+
+       return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       enum ieee80211_ampdu_mlme_action action,
+                       struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+                       u8 buf_size)
+{
+       struct il_priv *il = hw->priv;
+       int ret = -EINVAL;
+
+       D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+       if (!(il->cfg->sku & IL_SKU_N))
+               return -EACCES;
+
+       mutex_lock(&il->mutex);
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               D_HT("start Rx\n");
+               ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               D_HT("stop Rx\n");
+               ret = il4965_sta_rx_agg_stop(il, sta, tid);
+               if (test_bit(S_EXIT_PENDING, &il->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               D_HT("start Tx\n");
+               ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+               break;
+       case IEEE80211_AMPDU_TX_STOP:
+               D_HT("stop Tx\n");
+               ret = il4965_tx_agg_stop(il, vif, sta, tid);
+               if (test_bit(S_EXIT_PENDING, &il->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ret = 0;
+               break;
+       }
+       mutex_unlock(&il->mutex);
+
+       return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta)
+{
+       struct il_priv *il = hw->priv;
+       struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+       int ret;
+       u8 sta_id;
+
+       D_INFO("received request to add station %pM\n", sta->addr);
+       mutex_lock(&il->mutex);
+       D_INFO("proceeding to add station %pM\n", sta->addr);
+       sta_priv->common.sta_id = IL_INVALID_STATION;
+
+       atomic_set(&sta_priv->pending_frames, 0);
+
+       ret =
+           il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
+                                 &sta_id);
+       if (ret) {
+               IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+               /* Should we return success if return code is EEXIST ? */
+               mutex_unlock(&il->mutex);
+               return ret;
+       }
+
+       sta_priv->common.sta_id = sta_id;
+
+       /* Initialize rate scaling */
+       D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+       il4965_rs_rate_init(il, sta, sta_id);
+       mutex_unlock(&il->mutex);
+
+       return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw,
+                         struct ieee80211_channel_switch *ch_switch)
+{
+       struct il_priv *il = hw->priv;
+       const struct il_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = ch_switch->channel;
+       struct il_ht_config *ht_conf = &il->current_ht_config;
+
+       struct il_rxon_context *ctx = &il->ctx;
+       u16 ch;
+
+       D_MAC80211("enter\n");
+
+       mutex_lock(&il->mutex);
+
+       if (il_is_rfkill(il))
+               goto out;
+
+       if (test_bit(S_EXIT_PENDING, &il->status) ||
+           test_bit(S_SCANNING, &il->status) ||
+           test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+               goto out;
+
+       if (!il_is_associated_ctx(ctx))
+               goto out;
+
+       if (!il->cfg->ops->lib->set_channel_switch)
+               goto out;
+
+       ch = channel->hw_value;
+       if (le16_to_cpu(ctx->active.channel) == ch)
+               goto out;
+
+       ch_info = il_get_channel_info(il, channel->band, ch);
+       if (!il_is_channel_valid(ch_info)) {
+               D_MAC80211("invalid channel\n");
+               goto out;
+       }
+
+       spin_lock_irq(&il->lock);
+
+       il->current_ht_config.smps = conf->smps_mode;
+
+       /* Configure HT40 channels */
+       ctx->ht.enabled = conf_is_ht(conf);
+       if (ctx->ht.enabled) {
+               if (conf_is_ht40_minus(conf)) {
+                       ctx->ht.extension_chan_offset =
+                           IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                       ctx->ht.is_40mhz = true;
+               } else if (conf_is_ht40_plus(conf)) {
+                       ctx->ht.extension_chan_offset =
+                           IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                       ctx->ht.is_40mhz = true;
+               } else {
+                       ctx->ht.extension_chan_offset =
+                           IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                       ctx->ht.is_40mhz = false;
+               }
+       } else
+               ctx->ht.is_40mhz = false;
+
+       if ((le16_to_cpu(ctx->staging.channel) != ch))
+               ctx->staging.flags = 0;
+
+       il_set_rxon_channel(il, channel, ctx);
+       il_set_rxon_ht(il, ht_conf);
+       il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+       spin_unlock_irq(&il->lock);
+
+       il_set_rate(il);
+       /*
+        * at this point, staging_rxon has the
+        * configuration for channel switch
+        */
+       set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+       il->switch_channel = cpu_to_le16(ch);
+       if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+               clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+               il->switch_channel = 0;
+               ieee80211_chswitch_done(ctx->vif, false);
+       }
+
+out:
+       mutex_unlock(&il->mutex);
+       D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+                       unsigned int *total_flags, u64 multicast)
+{
+       struct il_priv *il = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+                  *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&il->mutex);
+
+       il->ctx.staging.filter_flags &= ~filter_nand;
+       il->ctx.staging.filter_flags |= filter_or;
+
+       /*
+        * Not committing directly because hardware can perform a scan,
+        * but we'll eventually commit the filter flags change anyway.
+        */
+
+       mutex_unlock(&il->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in il_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &=
+           FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+           FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+       struct il_priv *il = container_of(work, struct il_priv,
+                                         txpower_work);
+
+       mutex_lock(&il->mutex);
+
+       /* If a scan happened to start before we got here
+        * then just return; the stats notification will
+        * kick off another scheduled work to compensate for
+        * any temperature delta we missed here. */
+       if (test_bit(S_EXIT_PENDING, &il->status) ||
+           test_bit(S_SCANNING, &il->status))
+               goto out;
+
+       /* Regardless of if we are associated, we must reconfigure the
+        * TX power since frames can be sent on non-radar channels while
+        * not associated */
+       il->cfg->ops->lib->send_tx_power(il);
+
+       /* Update last_temperature to keep is_calib_needed from running
+        * when it isn't needed... */
+       il->last_temperature = il->temperature;
+out:
+       mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+       il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+       init_waitqueue_head(&il->wait_command_queue);
+
+       INIT_WORK(&il->restart, il4965_bg_restart);
+       INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+       INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+       INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+       INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+       il_setup_scan_deferred_work(il);
+
+       INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+       init_timer(&il->stats_periodic);
+       il->stats_periodic.data = (unsigned long)il;
+       il->stats_periodic.function = il4965_bg_stats_periodic;
+
+       init_timer(&il->watchdog);
+       il->watchdog.data = (unsigned long)il;
+       il->watchdog.function = il_bg_watchdog;
+
+       tasklet_init(&il->irq_tasklet,
+                    (void (*)(unsigned long))il4965_irq_tasklet,
+                    (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+       cancel_work_sync(&il->txpower_work);
+       cancel_delayed_work_sync(&il->init_alive_start);
+       cancel_delayed_work(&il->alive_start);
+       cancel_work_sync(&il->run_time_calib_work);
+
+       il_cancel_scan_deferred_work(il);
+
+       del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+       int i;
+
+       for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+               rates[i].bitrate = il_rates[i].ieee * 5;
+               rates[i].hw_value = i;  /* Rate scaling will work on idxes */
+               rates[i].hw_value_short = i;
+               rates[i].flags = 0;
+               if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+                       /*
+                        * If CCK != 1M then set short preamble rate flag.
+                        */
+                       rates[i].flags |=
+                           (il_rates[i].plcp ==
+                            RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+               }
+       }
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+       il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+       il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+                          int tx_fifo_id, int scd_retry)
+{
+       int txq_id = txq->q.id;
+
+       /* Find out whether to activate Tx queue */
+       int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+       /* Set up and activate */
+       il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+                  (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+                  (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+                  (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+                  (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+                  IL49_SCD_QUEUE_STTS_REG_MSK);
+
+       txq->sched_retry = scd_retry;
+
+       D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+              scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+       int ret;
+
+       spin_lock_init(&il->sta_lock);
+       spin_lock_init(&il->hcmd_lock);
+
+       INIT_LIST_HEAD(&il->free_frames);
+
+       mutex_init(&il->mutex);
+
+       il->ieee_channels = NULL;
+       il->ieee_rates = NULL;
+       il->band = IEEE80211_BAND_2GHZ;
+
+       il->iw_mode = NL80211_IFTYPE_STATION;
+       il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+       il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+       /* initialize force reset */
+       il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+       /* Choose which receivers/antennas to use */
+       if (il->cfg->ops->hcmd->set_rxon_chain)
+               il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+
+       il_init_scan_params(il);
+
+       ret = il_init_channel_map(il);
+       if (ret) {
+               IL_ERR("initializing regulatory failed: %d\n", ret);
+               goto err;
+       }
+
+       ret = il_init_geos(il);
+       if (ret) {
+               IL_ERR("initializing geos failed: %d\n", ret);
+               goto err_free_channel_map;
+       }
+       il4965_init_hw_rates(il, il->ieee_rates);
+
+       return 0;
+
+err_free_channel_map:
+       il_free_channel_map(il);
+err:
+       return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+       il4965_calib_free_results(il);
+       il_free_geos(il);
+       il_free_channel_map(il);
+       kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+       il->hw_rev = _il_rd(il, CSR_HW_REV);
+       il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+       il->rev_id = il->pci_dev->revision;
+       D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static int
+il4965_set_hw_params(struct il_priv *il)
+{
+       il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+       il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+       if (il->cfg->mod_params->amsdu_size_8K)
+               il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+       else
+               il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+       il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+       if (il->cfg->mod_params->disable_11n)
+               il->cfg->sku &= ~IL_SKU_N;
+
+       /* Device-specific setup */
+       return il->cfg->ops->lib->set_hw_params(il);
+}
+
+static const u8 il4965_bss_ac_to_fifo[] = {
+       IL_TX_FIFO_VO,
+       IL_TX_FIFO_VI,
+       IL_TX_FIFO_BE,
+       IL_TX_FIFO_BK,
+};
+
+static const u8 il4965_bss_ac_to_queue[] = {
+       0, 1, 2, 3,
+};
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       int err = 0;
+       struct il_priv *il;
+       struct ieee80211_hw *hw;
+       struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+       unsigned long flags;
+       u16 pci_cmd;
+
+       /************************
+        * 1. Allocating HW data
+        ************************/
+
+       hw = il_alloc_all(cfg);
+       if (!hw) {
+               err = -ENOMEM;
+               goto out;
+       }
+       il = hw->priv;
+       /* At this point both hw and il are allocated. */
+
+       il->ctx.ctxid = 0;
+
+       il->ctx.always_active = true;
+       il->ctx.is_active = true;
+       il->ctx.rxon_cmd = C_RXON;
+       il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+       il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+       il->ctx.qos_cmd = C_QOS_PARAM;
+       il->ctx.ap_sta_id = IL_AP_ID;
+       il->ctx.wep_key_cmd = C_WEPKEY;
+       il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
+       il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
+       il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
+       il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
+       il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
+       il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+       il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+       il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+       SET_IEEE80211_DEV(hw, &pdev->dev);
+
+       D_INFO("*** LOAD DRIVER ***\n");
+       il->cfg = cfg;
+       il->pci_dev = pdev;
+       il->inta_mask = CSR_INI_SET_MASK;
+
+       if (il_alloc_traffic_mem(il))
+               IL_ERR("Not enough memory to generate traffic log\n");
+
+       /**************************
+        * 2. Initializing PCI bus
+        **************************/
+       pci_disable_link_state(pdev,
+                              PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                              PCIE_LINK_STATE_CLKPM);
+
+       if (pci_enable_device(pdev)) {
+               err = -ENODEV;
+               goto out_ieee80211_free_hw;
+       }
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (err) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!err)
+                       err =
+                           pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               /* both attempts failed: */
+               if (err) {
+                       IL_WARN("No suitable DMA available.\n");
+                       goto out_pci_disable_device;
+               }
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err)
+               goto out_pci_disable_device;
+
+       pci_set_drvdata(pdev, il);
+
+       /***********************
+        * 3. Read REV register
+        ***********************/
+       il->hw_base = pci_iomap(pdev, 0, 0);
+       if (!il->hw_base) {
+               err = -ENODEV;
+               goto out_pci_release_regions;
+       }
+
+       D_INFO("pci_resource_len = 0x%08llx\n",
+              (unsigned long long)pci_resource_len(pdev, 0));
+       D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+       /* these spin locks will be used in apm_ops.init and EEPROM access
+        * we should init now
+        */
+       spin_lock_init(&il->reg_lock);
+       spin_lock_init(&il->lock);
+
+       /*
+        * stop and reset the on-board processor just in case it is in a
+        * strange state ... like being left stranded by a primary kernel
+        * and this is now the kdump kernel trying to start up
+        */
+       _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+       il4965_hw_detect(il);
+       IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+       /* We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state */
+       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+       il4965_prepare_card_hw(il);
+       if (!il->hw_ready) {
+               IL_WARN("Failed, HW not ready\n");
+               goto out_iounmap;
+       }
+
+       /*****************
+        * 4. Read EEPROM
+        *****************/
+       /* Read the EEPROM */
+       err = il_eeprom_init(il);
+       if (err) {
+               IL_ERR("Unable to init EEPROM\n");
+               goto out_iounmap;
+       }
+       err = il4965_eeprom_check_version(il);
+       if (err)
+               goto out_free_eeprom;
+
+       if (err)
+               goto out_free_eeprom;
+
+       /* extract MAC Address */
+       il4965_eeprom_get_mac(il, il->addresses[0].addr);
+       D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+       il->hw->wiphy->addresses = il->addresses;
+       il->hw->wiphy->n_addresses = 1;
+
+       /************************
+        * 5. Setup HW constants
+        ************************/
+       if (il4965_set_hw_params(il)) {
+               IL_ERR("failed to set hw parameters\n");
+               goto out_free_eeprom;
+       }
+
+       /*******************
+        * 6. Setup il
+        *******************/
+
+       err = il4965_init_drv(il);
+       if (err)
+               goto out_free_eeprom;
+       /* At this point both hw and il are initialized. */
+
+       /********************
+        * 7. Setup services
+        ********************/
+       spin_lock_irqsave(&il->lock, flags);
+       il_disable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       pci_enable_msi(il->pci_dev);
+
+       err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+       if (err) {
+               IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+               goto out_disable_msi;
+       }
+
+       il4965_setup_deferred_work(il);
+       il4965_setup_handlers(il);
+
+       /*********************************************
+        * 8. Enable interrupts and read RFKILL state
+        *********************************************/
+
+       /* enable rfkill interrupt: hw bug w/a */
+       pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+       if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+               pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+               pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+       }
+
+       il_enable_rfkill_int(il);
+
+       /* If platform's RF_KILL switch is NOT set to KILL */
+       if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+               clear_bit(S_RF_KILL_HW, &il->status);
+       else
+               set_bit(S_RF_KILL_HW, &il->status);
+
+       wiphy_rfkill_set_hw_state(il->hw->wiphy,
+                                 test_bit(S_RF_KILL_HW, &il->status));
+
+       il_power_initialize(il);
+
+       init_completion(&il->_4965.firmware_loading_complete);
+
+       err = il4965_request_firmware(il, true);
+       if (err)
+               goto out_destroy_workqueue;
+
+       return 0;
+
+out_destroy_workqueue:
+       destroy_workqueue(il->workqueue);
+       il->workqueue = NULL;
+       free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+       pci_disable_msi(il->pci_dev);
+       il4965_uninit_drv(il);
+out_free_eeprom:
+       il_eeprom_free(il);
+out_iounmap:
+       pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+       pci_set_drvdata(pdev, NULL);
+       pci_release_regions(pdev);
+out_pci_disable_device:
+       pci_disable_device(pdev);
+out_ieee80211_free_hw:
+       il_free_traffic_mem(il);
+       ieee80211_free_hw(il->hw);
+out:
+       return err;
+}
+
+static void __devexit
+il4965_pci_remove(struct pci_dev *pdev)
+{
+       struct il_priv *il = pci_get_drvdata(pdev);
+       unsigned long flags;
+
+       if (!il)
+               return;
+
+       wait_for_completion(&il->_4965.firmware_loading_complete);
+
+       D_INFO("*** UNLOAD DRIVER ***\n");
+
+       il_dbgfs_unregister(il);
+       sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+       /* ieee80211_unregister_hw call wil cause il_mac_stop to
+        * to be called and il4965_down since we are removing the device
+        * we need to set S_EXIT_PENDING bit.
+        */
+       set_bit(S_EXIT_PENDING, &il->status);
+
+       il_leds_exit(il);
+
+       if (il->mac80211_registered) {
+               ieee80211_unregister_hw(il->hw);
+               il->mac80211_registered = 0;
+       } else {
+               il4965_down(il);
+       }
+
+       /*
+        * Make sure device is reset to low power before unloading driver.
+        * This may be redundant with il4965_down(), but there are paths to
+        * run il4965_down() without calling apm_ops.stop(), and there are
+        * paths to avoid running il4965_down() at all before leaving driver.
+        * This (inexpensive) call *makes sure* device is reset.
+        */
+       il_apm_stop(il);
+
+       /* make sure we flush any pending irq or
+        * tasklet for the driver
+        */
+       spin_lock_irqsave(&il->lock, flags);
+       il_disable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       il4965_synchronize_irq(il);
+
+       il4965_dealloc_ucode_pci(il);
+
+       if (il->rxq.bd)
+               il4965_rx_queue_free(il, &il->rxq);
+       il4965_hw_txq_ctx_free(il);
+
+       il_eeprom_free(il);
+
+       /*netif_stop_queue(dev); */
+       flush_workqueue(il->workqueue);
+
+       /* ieee80211_unregister_hw calls il_mac_stop, which flushes
+        * il->workqueue... so we can't take down the workqueue
+        * until now... */
+       destroy_workqueue(il->workqueue);
+       il->workqueue = NULL;
+       il_free_traffic_mem(il);
+
+       free_irq(il->pci_dev->irq, il);
+       pci_disable_msi(il->pci_dev);
+       pci_iounmap(pdev, il->hw_base);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       il4965_uninit_drv(il);
+
+       dev_kfree_skb(il->beacon_skb);
+
+       ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+       il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
+       {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+       {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+       {0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+       .name = DRV_NAME,
+       .id_table = il4965_hw_card_ids,
+       .probe = il4965_pci_probe,
+       .remove = __devexit_p(il4965_pci_remove),
+       .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+       int ret;
+       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+       pr_info(DRV_COPYRIGHT "\n");
+
+       ret = il4965_rate_control_register();
+       if (ret) {
+               pr_err("Unable to register rate control algorithm: %d\n", ret);
+               return ret;
+       }
+
+       ret = pci_register_driver(&il4965_driver);
+       if (ret) {
+               pr_err("Unable to initialize PCI module\n");
+               goto error_register;
+       }
+
+       return ret;
+
+error_register:
+       il4965_rate_control_unregister();
+       return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+       pci_unregister_driver(&il4965_driver);
+       il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
+                  S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644 (file)
index 0000000..467d0cb
--- /dev/null
@@ -0,0 +1,2860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY      1
+#define IL_HT_NUMBER_TRY   3
+
+#define RATE_MAX_WINDOW                62      /* # tx in history win */
+#define RATE_MIN_FAILURE_TH            6       /* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH            8       /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX             15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+       RATE_6M_IDX, RATE_6M_IDX,
+       RATE_6M_IDX, RATE_6M_IDX,
+       RATE_6M_IDX,
+       RATE_6M_IDX, RATE_9M_IDX,
+       RATE_12M_IDX, RATE_18M_IDX,
+       RATE_24M_IDX, RATE_36M_IDX,
+       RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+       /*ANT_NONE -> */ ANT_NONE,
+       /*ANT_A    -> */ ANT_B,
+       /*ANT_B    -> */ ANT_C,
+       /*ANT_AB   -> */ ANT_BC,
+       /*ANT_C    -> */ ANT_A,
+       /*ANT_AC   -> */ ANT_AB,
+       /*ANT_BC   -> */ ANT_AC,
+       /*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+       [RATE_##r##M_IDX] = { RATE_##r##M_PLCP,      \
+                                   RATE_SISO_##s##M_PLCP, \
+                                   RATE_MIMO2_##s##M_PLCP,\
+                                   RATE_##r##M_IEEE,      \
+                                   RATE_##ip##M_IDX,    \
+                                   RATE_##in##M_IDX,    \
+                                   RATE_##rp##M_IDX,    \
+                                   RATE_##rn##M_IDX,    \
+                                   RATE_##pp##M_IDX,    \
+                                   RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+       IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),   /*  1mbps */
+       IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),         /*  2mbps */
+       IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),       /*5.5mbps */
+       IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),     /* 11mbps */
+       IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),         /*  6mbps */
+       IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),        /*  9mbps */
+       IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
+       IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
+       IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
+       IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
+       IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
+       IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+       IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+       int idx = 0;
+
+       /* HT rate format */
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               idx = (rate_n_flags & 0xff);
+
+               if (idx >= RATE_MIMO2_6M_PLCP)
+                       idx = idx - RATE_MIMO2_6M_PLCP;
+
+               idx += IL_FIRST_OFDM_RATE;
+               /* skip 9M not supported in ht */
+               if (idx >= RATE_9M_IDX)
+                       idx += 1;
+               if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+                       return idx;
+
+               /* legacy rate format, search for match in table */
+       } else {
+               for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+                       if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+                               return idx;
+       }
+
+       return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+                                        struct sk_buff *skb,
+                                        struct ieee80211_sta *sta,
+                                        struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+                                   struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+                                   bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+                                   u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *     1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+       {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202},  /* Norm */
+       {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210},  /* SGI */
+       {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381},  /* AGG */
+       {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+       {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+       {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+       {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+       {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683},        /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+       {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+       {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+       {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+       {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660},        /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+       {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289},        /* Norm */
+       {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293},        /* SGI */
+       {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922},        /* AGG */
+       {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966},        /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+       {"1", "BPSK DSSS"},
+       {"2", "QPSK DSSS"},
+       {"5.5", "BPSK CCK"},
+       {"11", "QPSK CCK"},
+       {"6", "BPSK 1/2"},
+       {"9", "BPSK 1/2"},
+       {"12", "QPSK 1/2"},
+       {"18", "QPSK 3/4"},
+       {"24", "16QAM 1/2"},
+       {"36", "16QAM 3/4"},
+       {"48", "64QAM 2/3"},
+       {"54", "64QAM 3/4"},
+       {"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM     (8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+       return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+       win->data = 0;
+       win->success_counter = 0;
+       win->success_ratio = IL_INVALID_VALUE;
+       win->counter = 0;
+       win->average_tpt = IL_INVALID_VALUE;
+       win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+       return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *     removes the old data from the stats. All data that is older than
+ *     TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+       /* The oldest age we want to keep */
+       u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+       while (tl->queue_count && tl->time_stamp < oldest_time) {
+               tl->total -= tl->packet_count[tl->head];
+               tl->packet_count[tl->head] = 0;
+               tl->time_stamp += TID_QUEUE_CELL_SPACING;
+               tl->queue_count--;
+               tl->head++;
+               if (tl->head >= TID_QUEUE_MAX_SIZE)
+                       tl->head = 0;
+       }
+}
+
+/*
+ *     increment traffic load value for tid and also remove
+ *     any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 idx;
+       struct il_traffic_load *tl = NULL;
+       u8 tid;
+
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       } else
+               return MAX_TID_COUNT;
+
+       if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+               return MAX_TID_COUNT;
+
+       tl = &lq_data->load[tid];
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       /* Happens only for the first packet. Initialize the data */
+       if (!(tl->queue_count)) {
+               tl->total = 1;
+               tl->time_stamp = curr_time;
+               tl->queue_count = 1;
+               tl->head = 0;
+               tl->packet_count[0] = 1;
+               return MAX_TID_COUNT;
+       }
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (idx >= TID_QUEUE_MAX_SIZE)
+               il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+       idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+       tl->packet_count[idx] = tl->packet_count[idx] + 1;
+       tl->total = tl->total + 1;
+
+       if ((idx + 1) > tl->queue_count)
+               tl->queue_count = idx + 1;
+
+       return tid;
+}
+
+/*
+       get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+       u32 curr_time = jiffies_to_msecs(jiffies);
+       u32 time_diff;
+       s32 idx;
+       struct il_traffic_load *tl = NULL;
+
+       if (tid >= TID_MAX_LOAD_COUNT)
+               return 0;
+
+       tl = &(lq_data->load[tid]);
+
+       curr_time -= curr_time % TID_ROUND_VALUE;
+
+       if (!(tl->queue_count))
+               return 0;
+
+       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+       idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+       /* The history is too long: remove data that is older than */
+       /* TID_MAX_TIME_DIFF */
+       if (idx >= TID_QUEUE_MAX_SIZE)
+               il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+       return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+                                u8 tid, struct ieee80211_sta *sta)
+{
+       int ret = -EAGAIN;
+       u32 load;
+
+       load = il4965_rs_tl_get_load(lq_data, tid);
+
+       if (load > IL_AGG_LOAD_THRESHOLD) {
+               D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+               ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+               if (ret == -EAGAIN) {
+                       /*
+                        * driver and mac80211 is out of sync
+                        * this might be cause by reloading firmware
+                        * stop the tx ba session here
+                        */
+                       IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+                       ieee80211_stop_tx_ba_session(sta, tid);
+               }
+       } else
+               D_HT("Aggregation not enabled for tid %d because load = %u\n",
+                    tid, load);
+
+       return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+                        struct ieee80211_sta *sta)
+{
+       if (tid < TID_MAX_LOAD_COUNT)
+               il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+       else
+               IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+                      TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+       return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+           !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+           !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+       if (tbl->expected_tpt)
+               return tbl->expected_tpt[rs_idx];
+       return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate.  win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+                         int attempts, int successes)
+{
+       struct il_rate_scale_data *win = NULL;
+       static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+       s32 fail_count, tpt;
+
+       if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+               return -EINVAL;
+
+       /* Select win for current tx bit rate */
+       win = &(tbl->win[scale_idx]);
+
+       /* Get expected throughput */
+       tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+       /*
+        * Keep track of only the latest 62 tx frame attempts in this rate's
+        * history win; anything older isn't really relevant any more.
+        * If we have filled up the sliding win, drop the oldest attempt;
+        * if the oldest attempt (highest bit in bitmap) shows "success",
+        * subtract "1" from the success counter (this is the main reason
+        * we keep these bitmaps!).
+        */
+       while (attempts > 0) {
+               if (win->counter >= RATE_MAX_WINDOW) {
+
+                       /* remove earliest */
+                       win->counter = RATE_MAX_WINDOW - 1;
+
+                       if (win->data & mask) {
+                               win->data &= ~mask;
+                               win->success_counter--;
+                       }
+               }
+
+               /* Increment frames-attempted counter */
+               win->counter++;
+
+               /* Shift bitmap by one frame to throw away oldest history */
+               win->data <<= 1;
+
+               /* Mark the most recent #successes attempts as successful */
+               if (successes > 0) {
+                       win->success_counter++;
+                       win->data |= 0x1;
+                       successes--;
+               }
+
+               attempts--;
+       }
+
+       /* Calculate current success ratio, avoid divide-by-0! */
+       if (win->counter > 0)
+               win->success_ratio =
+                   128 * (100 * win->success_counter) / win->counter;
+       else
+               win->success_ratio = IL_INVALID_VALUE;
+
+       fail_count = win->counter - win->success_counter;
+
+       /* Calculate average throughput, if we have enough history. */
+       if (fail_count >= RATE_MIN_FAILURE_TH ||
+           win->success_counter >= RATE_MIN_SUCCESS_TH)
+               win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+       else
+               win->average_tpt = IL_INVALID_VALUE;
+
+       /* Tag this win as having been updated */
+       win->stamp = jiffies;
+
+       return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+                            int idx, u8 use_green)
+{
+       u32 rate_n_flags = 0;
+
+       if (is_legacy(tbl->lq_type)) {
+               rate_n_flags = il_rates[idx].plcp;
+               if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+                       rate_n_flags |= RATE_MCS_CCK_MSK;
+
+       } else if (is_Ht(tbl->lq_type)) {
+               if (idx > IL_LAST_OFDM_RATE) {
+                       IL_ERR("Invalid HT rate idx %d\n", idx);
+                       idx = IL_LAST_OFDM_RATE;
+               }
+               rate_n_flags = RATE_MCS_HT_MSK;
+
+               if (is_siso(tbl->lq_type))
+                       rate_n_flags |= il_rates[idx].plcp_siso;
+               else
+                       rate_n_flags |= il_rates[idx].plcp_mimo2;
+       } else {
+               IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+       }
+
+       rate_n_flags |=
+           ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+       if (is_Ht(tbl->lq_type)) {
+               if (tbl->is_ht40) {
+                       if (tbl->is_dup)
+                               rate_n_flags |= RATE_MCS_DUP_MSK;
+                       else
+                               rate_n_flags |= RATE_MCS_HT40_MSK;
+               }
+               if (tbl->is_SGI)
+                       rate_n_flags |= RATE_MCS_SGI_MSK;
+
+               if (use_green) {
+                       rate_n_flags |= RATE_MCS_GF_MSK;
+                       if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+                               rate_n_flags &= ~RATE_MCS_SGI_MSK;
+                               IL_ERR("GF was set with SGI:SISO\n");
+                       }
+               }
+       }
+       return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+                               enum ieee80211_band band,
+                               struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+       u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+       u8 il4965_num_of_ant =
+           il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+       u8 mcs;
+
+       memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+       *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+       if (*rate_idx == RATE_INVALID) {
+               *rate_idx = -1;
+               return -EINVAL;
+       }
+       tbl->is_SGI = 0;        /* default legacy setup */
+       tbl->is_ht40 = 0;
+       tbl->is_dup = 0;
+       tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+       tbl->lq_type = LQ_NONE;
+       tbl->max_search = IL_MAX_SEARCH;
+
+       /* legacy rate format */
+       if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+               if (il4965_num_of_ant == 1) {
+                       if (band == IEEE80211_BAND_5GHZ)
+                               tbl->lq_type = LQ_A;
+                       else
+                               tbl->lq_type = LQ_G;
+               }
+               /* HT rate format */
+       } else {
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       tbl->is_SGI = 1;
+
+               if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+                   (rate_n_flags & RATE_MCS_DUP_MSK))
+                       tbl->is_ht40 = 1;
+
+               if (rate_n_flags & RATE_MCS_DUP_MSK)
+                       tbl->is_dup = 1;
+
+               mcs = il4965_rs_extract_rate(rate_n_flags);
+
+               /* SISO */
+               if (mcs <= RATE_SISO_60M_PLCP) {
+                       if (il4965_num_of_ant == 1)
+                               tbl->lq_type = LQ_SISO; /*else NONE */
+                       /* MIMO2 */
+               } else {
+                       if (il4965_num_of_ant == 2)
+                               tbl->lq_type = LQ_MIMO2;
+               }
+       }
+       return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+                        struct il_scale_tbl_info *tbl)
+{
+       u8 new_ant_type;
+
+       if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+               return 0;
+
+       if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+               return 0;
+
+       new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+       while (new_ant_type != tbl->ant_type &&
+              !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+               new_ant_type = ant_toggle_lookup[new_ant_type];
+
+       if (new_ant_type == tbl->ant_type)
+               return 0;
+
+       tbl->ant_type = new_ant_type;
+       *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+       *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+       return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct ieee80211_sta *sta)
+{
+       struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+       return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+           !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+                             struct ieee80211_hdr *hdr,
+                             enum il_table_type rate_type)
+{
+       if (is_legacy(rate_type)) {
+               return lq_sta->active_legacy_rate;
+       } else {
+               if (is_siso(rate_type))
+                       return lq_sta->active_siso_rate;
+               else
+                       return lq_sta->active_mimo2_rate;
+       }
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+                           int rate_type)
+{
+       u8 high = RATE_INVALID;
+       u8 low = RATE_INVALID;
+
+       /* 802.11A or ht walks to the next literal adjacent rate in
+        * the rate table */
+       if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+               int i;
+               u32 mask;
+
+               /* Find the previous rate that is in the rate mask */
+               i = idx - 1;
+               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+                       if (rate_mask & mask) {
+                               low = i;
+                               break;
+                       }
+               }
+
+               /* Find the next rate that is in the rate mask */
+               i = idx + 1;
+               for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+                       if (rate_mask & mask) {
+                               high = i;
+                               break;
+                       }
+               }
+
+               return (high << 8) | low;
+       }
+
+       low = idx;
+       while (low != RATE_INVALID) {
+               low = il_rates[low].prev_rs;
+               if (low == RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << low))
+                       break;
+               D_RATE("Skipping masked lower rate: %d\n", low);
+       }
+
+       high = idx;
+       while (high != RATE_INVALID) {
+               high = il_rates[high].next_rs;
+               if (high == RATE_INVALID)
+                       break;
+               if (rate_mask & (1 << high))
+                       break;
+               D_RATE("Skipping masked higher rate: %d\n", high);
+       }
+
+       return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+                        struct il_scale_tbl_info *tbl, u8 scale_idx,
+                        u8 ht_possible)
+{
+       s32 low;
+       u16 rate_mask;
+       u16 high_low;
+       u8 switch_to_legacy = 0;
+       u8 is_green = lq_sta->is_green;
+       struct il_priv *il = lq_sta->drv;
+
+       /* check if we need to switch from HT to legacy rates.
+        * assumption is that mandatory rates (1Mbps or 6Mbps)
+        * are always supported (spec demand) */
+       if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+               switch_to_legacy = 1;
+               scale_idx = rs_ht_to_legacy[scale_idx];
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       tbl->lq_type = LQ_A;
+               else
+                       tbl->lq_type = LQ_G;
+
+               if (il4965_num_of_ant(tbl->ant_type) > 1)
+                       tbl->ant_type =
+                           il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+               tbl->is_ht40 = 0;
+               tbl->is_SGI = 0;
+               tbl->max_search = IL_MAX_SEARCH;
+       }
+
+       rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+       /* Mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               /* supp_rates has no CCK bits in A mode */
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       rate_mask =
+                           (u16) (rate_mask &
+                                  (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+               else
+                       rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+       }
+
+       /* If we switched from HT to legacy, check current rate */
+       if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+               low = scale_idx;
+               goto out;
+       }
+
+       high_low =
+           il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+
+       if (low == RATE_INVALID)
+               low = scale_idx;
+
+out:
+       return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+                         struct il_scale_tbl_info *b)
+{
+       return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+               a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+                   struct ieee80211_sta *sta, void *il_sta,
+                   struct sk_buff *skb)
+{
+       int legacy_success;
+       int retries;
+       int rs_idx, mac_idx, i;
+       struct il_lq_sta *lq_sta = il_sta;
+       struct il_link_quality_cmd *table;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct il_priv *il = (struct il_priv *)il_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       enum mac80211_rate_control_flags mac_flags;
+       u32 tx_rate;
+       struct il_scale_tbl_info tbl_type;
+       struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+       struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+       D_RATE("get frame ack response, update rate scale win\n");
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (!lq_sta) {
+               D_RATE("Station rate scaling not created yet.\n");
+               return;
+       } else if (!lq_sta->drv) {
+               D_RATE("Rate scaling not initialized yet.\n");
+               return;
+       }
+
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           (info->flags & IEEE80211_TX_CTL_NO_ACK))
+               return;
+
+       /* This packet was aggregated but doesn't carry status info */
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+           !(info->flags & IEEE80211_TX_STAT_AMPDU))
+               return;
+
+       /*
+        * Ignore this Tx frame response if its initial rate doesn't match
+        * that of latest Link Quality command.  There may be stragglers
+        * from a previous Link Quality command, but we're no longer interested
+        * in those; they're either from the "active" mode while we're trying
+        * to check "search" mode, or a prior "search" mode after we've moved
+        * to a new "search" mode (which might become the new "active" mode).
+        */
+       table = &lq_sta->lq;
+       tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+       il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+       if (il->band == IEEE80211_BAND_5GHZ)
+               rs_idx -= IL_FIRST_OFDM_RATE;
+       mac_flags = info->status.rates[0].flags;
+       mac_idx = info->status.rates[0].idx;
+       /* For HT packets, map MCS to PLCP */
+       if (mac_flags & IEEE80211_TX_RC_MCS) {
+               mac_idx &= RATE_MCS_CODE_MSK;   /* Remove # of streams */
+               if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+                       mac_idx++;
+               /*
+                * mac80211 HT idx is always zero-idxed; we need to move
+                * HT OFDM rates after CCK rates in 2.4 GHz band
+                */
+               if (il->band == IEEE80211_BAND_2GHZ)
+                       mac_idx += IL_FIRST_OFDM_RATE;
+       }
+       /* Here we actually compare this rate to the latest LQ command */
+       if (mac_idx < 0 ||
+           tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+           tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+           tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+           tbl_type.ant_type != info->antenna_sel_tx ||
+           !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+           || !!(tx_rate & RATE_MCS_GF_MSK) !=
+           !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+               D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+                      rs_idx, tx_rate);
+               /*
+                * Since rates mis-match, the last LQ command may have failed.
+                * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+                * ... driver.
+                */
+               lq_sta->missed_rate_counter++;
+               if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+                       lq_sta->missed_rate_counter = 0;
+                       il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+               }
+               /* Regardless, ignore this status info for outdated rate */
+               return;
+       } else
+               /* Rate did match, so reset the missed_rate_counter */
+               lq_sta->missed_rate_counter = 0;
+
+       /* Figure out if rate scale algorithm is in active or search table */
+       if (il4965_table_type_matches
+           (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+               curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+       } else
+           if (il4965_table_type_matches
+               (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+               curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       } else {
+               D_RATE("Neither active nor search matches tx rate\n");
+               tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+                      tmp_tbl->ant_type, tmp_tbl->is_SGI);
+               tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+               D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+                      tmp_tbl->ant_type, tmp_tbl->is_SGI);
+               D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+                      tbl_type.ant_type, tbl_type.is_SGI);
+               /*
+                * no matching table found, let's by-pass the data collection
+                * and continue to perform rate scale to find the rate table
+                */
+               il4965_rs_stay_in_table(lq_sta, true);
+               goto done;
+       }
+
+       /*
+        * Updating the frame history depends on whether packets were
+        * aggregated.
+        *
+        * For aggregation, all packets were transmitted at the same rate, the
+        * first idx into rate scale table.
+        */
+       if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+               tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+               il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+                                               &rs_idx);
+               il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+                                         info->status.ampdu_len,
+                                         info->status.ampdu_ack_len);
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += info->status.ampdu_ack_len;
+                       lq_sta->total_failed +=
+                           (info->status.ampdu_len -
+                            info->status.ampdu_ack_len);
+               }
+       } else {
+               /*
+                * For legacy, update frame history with for each Tx retry.
+                */
+               retries = info->status.rates[0].count - 1;
+               /* HW doesn't send more than 15 retries */
+               retries = min(retries, 15);
+
+               /* The last transmission may have been successful */
+               legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+               /* Collect data for each rate used during failed TX attempts */
+               for (i = 0; i <= retries; ++i) {
+                       tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+                       il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+                                                       &tbl_type, &rs_idx);
+                       /*
+                        * Only collect stats if retried rate is in the same RS
+                        * table as active/search.
+                        */
+                       if (il4965_table_type_matches(&tbl_type, curr_tbl))
+                               tmp_tbl = curr_tbl;
+                       else if (il4965_table_type_matches
+                                (&tbl_type, other_tbl))
+                               tmp_tbl = other_tbl;
+                       else
+                               continue;
+                       il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+                                                 i <
+                                                 retries ? 0 : legacy_success);
+               }
+
+               /* Update success/fail counts if not searching for new mode */
+               if (lq_sta->stay_in_tbl) {
+                       lq_sta->total_success += legacy_success;
+                       lq_sta->total_failed += retries + (1 - legacy_success);
+               }
+       }
+       /* The last TX rate is cached in lq_sta; it's set in if/else above */
+       lq_sta->last_rate_n_flags = tx_rate;
+done:
+       /* See if there's a better rate or modulation mode to try. */
+       if (sta->supp_rates[sband->band])
+               il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+                           struct il_lq_sta *lq_sta)
+{
+       D_RATE("we are staying in the same table\n");
+       lq_sta->stay_in_tbl = 1;        /* only place this gets set */
+       if (is_legacy) {
+               lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+               lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+       } else {
+               lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+               lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+               lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+       }
+       lq_sta->table_count = 0;
+       lq_sta->total_failed = 0;
+       lq_sta->total_success = 0;
+       lq_sta->flush_timer = jiffies;
+       lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+                                struct il_scale_tbl_info *tbl)
+{
+       /* Used to choose among HT tables */
+       s32(*ht_tbl_pointer)[RATE_COUNT];
+
+       /* Check for invalid LQ type */
+       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Legacy rates have only one table */
+       if (is_legacy(tbl->lq_type)) {
+               tbl->expected_tpt = expected_tpt_legacy;
+               return;
+       }
+
+       /* Choose among many HT tables depending on number of streams
+        * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+        * status */
+       if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+               ht_tbl_pointer = expected_tpt_siso20MHz;
+       else if (is_siso(tbl->lq_type))
+               ht_tbl_pointer = expected_tpt_siso40MHz;
+       else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+               ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+       else                    /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+               ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+       if (!tbl->is_SGI && !lq_sta->is_agg)    /* Normal */
+               tbl->expected_tpt = ht_tbl_pointer[0];
+       else if (tbl->is_SGI && !lq_sta->is_agg)        /* SGI */
+               tbl->expected_tpt = ht_tbl_pointer[1];
+       else if (!tbl->is_SGI && lq_sta->is_agg)        /* AGG */
+               tbl->expected_tpt = ht_tbl_pointer[2];
+       else                    /* AGG+SGI */
+               tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+                       struct il_scale_tbl_info *tbl,  /* "search" */
+                       u16 rate_mask, s8 idx)
+{
+       /* "active" values */
+       struct il_scale_tbl_info *active_tbl =
+           &(lq_sta->lq_info[lq_sta->active_tbl]);
+       s32 active_sr = active_tbl->win[idx].success_ratio;
+       s32 active_tpt = active_tbl->expected_tpt[idx];
+
+       /* expected "search" throughput */
+       s32 *tpt_tbl = tbl->expected_tpt;
+
+       s32 new_rate, high, low, start_hi;
+       u16 high_low;
+       s8 rate = idx;
+
+       new_rate = high = low = start_hi = RATE_INVALID;
+
+       for (;;) {
+               high_low =
+                   il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+                                               tbl->lq_type);
+
+               low = high_low & 0xff;
+               high = (high_low >> 8) & 0xff;
+
+               /*
+                * Lower the "search" bit rate, to give new "search" mode
+                * approximately the same throughput as "active" if:
+                *
+                * 1) "Active" mode has been working modestly well (but not
+                *    great), and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above the actual
+                *    measured "active" throughput (but less than expected
+                *    "active" throughput under perfect conditions).
+                * OR
+                * 2) "Active" mode has been working perfectly or very well
+                *    and expected "search" throughput (under perfect
+                *    conditions) at candidate rate is above expected
+                *    "active" throughput (under perfect conditions).
+                */
+               if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+                    (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+                     && tpt_tbl[rate] <= active_tpt)) ||
+                   (active_sr >= RATE_SCALE_SWITCH &&
+                    tpt_tbl[rate] > active_tpt)) {
+
+                       /* (2nd or later pass)
+                        * If we've already tried to raise the rate, and are
+                        * now trying to lower it, use the higher rate. */
+                       if (start_hi != RATE_INVALID) {
+                               new_rate = start_hi;
+                               break;
+                       }
+
+                       new_rate = rate;
+
+                       /* Loop again with lower rate */
+                       if (low != RATE_INVALID)
+                               rate = low;
+
+                       /* Lower rate not available, use the original */
+                       else
+                               break;
+
+                       /* Else try to raise the "search" rate to match "active" */
+               } else {
+                       /* (2nd or later pass)
+                        * If we've already tried to lower the rate, and are
+                        * now trying to raise it, use the lower rate. */
+                       if (new_rate != RATE_INVALID)
+                               break;
+
+                       /* Loop again with higher rate */
+                       else if (high != RATE_INVALID) {
+                               start_hi = high;
+                               rate = high;
+
+                               /* Higher rate not available, use the original */
+                       } else {
+                               new_rate = rate;
+                               break;
+                       }
+               }
+       }
+
+       return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+                         struct ieee80211_conf *conf,
+                         struct ieee80211_sta *sta,
+                         struct il_scale_tbl_info *tbl, int idx)
+{
+       u16 rate_mask;
+       s32 rate;
+       s8 is_green = lq_sta->is_green;
+       struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+               return -1;
+
+       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
+           WLAN_HT_CAP_SM_PS_STATIC)
+               return -1;
+
+       /* Need both Tx chains/antennas to support MIMO */
+       if (il->hw_params.tx_chains_num < 2)
+               return -1;
+
+       D_RATE("LQ: try to switch to MIMO2\n");
+
+       tbl->lq_type = LQ_MIMO2;
+       tbl->is_dup = lq_sta->is_dup;
+       tbl->action = 0;
+       tbl->max_search = IL_MAX_SEARCH;
+       rate_mask = lq_sta->active_mimo2_rate;
+
+       if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+       rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+       D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+       if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+               D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+                      rate_mask);
+               return -1;
+       }
+       tbl->current_rate =
+           il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+       D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+              is_green);
+       return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+                        struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+                        struct il_scale_tbl_info *tbl, int idx)
+{
+       u16 rate_mask;
+       u8 is_green = lq_sta->is_green;
+       s32 rate;
+       struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+               return -1;
+
+       D_RATE("LQ: try to switch to SISO\n");
+
+       tbl->is_dup = lq_sta->is_dup;
+       tbl->lq_type = LQ_SISO;
+       tbl->action = 0;
+       tbl->max_search = IL_MAX_SEARCH;
+       rate_mask = lq_sta->active_siso_rate;
+
+       if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+               tbl->is_ht40 = 1;
+       else
+               tbl->is_ht40 = 0;
+
+       if (is_green)
+               tbl->is_SGI = 0;        /*11n spec: no SGI in SISO+Greenfield */
+
+       il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+       rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+       D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+       if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+               D_RATE("can not switch with idx %d rate mask %x\n", rate,
+                      rate_mask);
+               return -1;
+       }
+       tbl->current_rate =
+           il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+       D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+              is_green);
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+                           struct ieee80211_conf *conf,
+                           struct ieee80211_sta *sta, int idx)
+{
+       struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct il_scale_tbl_info *search_tbl =
+           &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct il_rate_scale_data *win = &(tbl->win[idx]);
+       u32 sz =
+           (sizeof(struct il_scale_tbl_info) -
+            (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+       u8 tx_chains_num = il->hw_params.tx_chains_num;
+       int ret = 0;
+       u8 update_search_tbl_counter = 0;
+
+       tbl->action = IL_LEGACY_SWITCH_SISO;
+
+       start_action = tbl->action;
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IL_LEGACY_SWITCH_ANTENNA1:
+               case IL_LEGACY_SWITCH_ANTENNA2:
+                       D_RATE("LQ: Legacy toggle Antenna\n");
+
+                       if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+                            tx_chains_num <= 1) ||
+                           (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+                            tx_chains_num <= 2))
+                               break;
+
+                       /* Don't change antenna if success has been great */
+                       if (win->success_ratio >= IL_RS_GOOD_RATIO)
+                               break;
+
+                       /* Set up search table to try other antenna */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (il4965_rs_toggle_antenna
+                           (valid_tx_ant, &search_tbl->current_rate,
+                            search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               il4965_rs_set_expected_tpt_table(lq_sta,
+                                                                search_tbl);
+                               goto out;
+                       }
+                       break;
+               case IL_LEGACY_SWITCH_SISO:
+                       D_RATE("LQ: Legacy switch to SISO\n");
+
+                       /* Set up search table to try SISO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+                       ret =
+                           il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+                                                    search_tbl, idx);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+
+                       break;
+               case IL_LEGACY_SWITCH_MIMO2_AB:
+               case IL_LEGACY_SWITCH_MIMO2_AC:
+               case IL_LEGACY_SWITCH_MIMO2_BC:
+                       D_RATE("LQ: Legacy switch to MIMO2\n");
+
+                       /* Set up search table to try MIMO */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!il4965_rs_is_valid_ant
+                           (valid_tx_ant, search_tbl->ant_type))
+                               break;
+
+                       ret =
+                           il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+                                                     search_tbl, idx);
+                       if (!ret) {
+                               lq_sta->action_counter = 0;
+                               goto out;
+                       }
+                       break;
+               }
+               tbl->action++;
+               if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+                       tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+               tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+       return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+                            struct ieee80211_conf *conf,
+                            struct ieee80211_sta *sta, int idx)
+{
+       u8 is_green = lq_sta->is_green;
+       struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct il_scale_tbl_info *search_tbl =
+           &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct il_rate_scale_data *win = &(tbl->win[idx]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz =
+           (sizeof(struct il_scale_tbl_info) -
+            (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+       u8 tx_chains_num = il->hw_params.tx_chains_num;
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IL_SISO_SWITCH_ANTENNA1:
+               case IL_SISO_SWITCH_ANTENNA2:
+                       D_RATE("LQ: SISO toggle Antenna\n");
+                       if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+                            tx_chains_num <= 1) ||
+                           (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+                            tx_chains_num <= 2))
+                               break;
+
+                       if (win->success_ratio >= IL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (il4965_rs_toggle_antenna
+                           (valid_tx_ant, &search_tbl->current_rate,
+                            search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IL_SISO_SWITCH_MIMO2_AB:
+               case IL_SISO_SWITCH_MIMO2_AC:
+               case IL_SISO_SWITCH_MIMO2_BC:
+                       D_RATE("LQ: SISO switch to MIMO2\n");
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = 0;
+
+                       if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+                               search_tbl->ant_type = ANT_AB;
+                       else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+                               search_tbl->ant_type = ANT_AC;
+                       else
+                               search_tbl->ant_type = ANT_BC;
+
+                       if (!il4965_rs_is_valid_ant
+                           (valid_tx_ant, search_tbl->ant_type))
+                               break;
+
+                       ret =
+                           il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+                                                     search_tbl, idx);
+                       if (!ret)
+                               goto out;
+                       break;
+               case IL_SISO_SWITCH_GI:
+                       if (!tbl->is_ht40 &&
+                           !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 &&
+                           !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (is_green) {
+                               if (!tbl->is_SGI)
+                                       break;
+                               else
+                                       IL_ERR("SGI was set in GF+SISO\n");
+                       }
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[idx])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                           il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+                                                        is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+               }
+               tbl->action++;
+               if (tbl->action > IL_SISO_SWITCH_GI)
+                       tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+
+out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IL_SISO_SWITCH_GI)
+               tbl->action = IL_SISO_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+                             struct ieee80211_conf *conf,
+                             struct ieee80211_sta *sta, int idx)
+{
+       s8 is_green = lq_sta->is_green;
+       struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+       struct il_scale_tbl_info *search_tbl =
+           &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+       struct il_rate_scale_data *win = &(tbl->win[idx]);
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       u32 sz =
+           (sizeof(struct il_scale_tbl_info) -
+            (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+       u8 start_action;
+       u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+       u8 tx_chains_num = il->hw_params.tx_chains_num;
+       u8 update_search_tbl_counter = 0;
+       int ret;
+
+       start_action = tbl->action;
+       for (;;) {
+               lq_sta->action_counter++;
+               switch (tbl->action) {
+               case IL_MIMO2_SWITCH_ANTENNA1:
+               case IL_MIMO2_SWITCH_ANTENNA2:
+                       D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+                       if (tx_chains_num <= 2)
+                               break;
+
+                       if (win->success_ratio >= IL_RS_GOOD_RATIO)
+                               break;
+
+                       memcpy(search_tbl, tbl, sz);
+                       if (il4965_rs_toggle_antenna
+                           (valid_tx_ant, &search_tbl->current_rate,
+                            search_tbl)) {
+                               update_search_tbl_counter = 1;
+                               goto out;
+                       }
+                       break;
+               case IL_MIMO2_SWITCH_SISO_A:
+               case IL_MIMO2_SWITCH_SISO_B:
+               case IL_MIMO2_SWITCH_SISO_C:
+                       D_RATE("LQ: MIMO2 switch to SISO\n");
+
+                       /* Set up new search table for SISO */
+                       memcpy(search_tbl, tbl, sz);
+
+                       if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+                               search_tbl->ant_type = ANT_A;
+                       else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+                               search_tbl->ant_type = ANT_B;
+                       else
+                               search_tbl->ant_type = ANT_C;
+
+                       if (!il4965_rs_is_valid_ant
+                           (valid_tx_ant, search_tbl->ant_type))
+                               break;
+
+                       ret =
+                           il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+                                                    search_tbl, idx);
+                       if (!ret)
+                               goto out;
+
+                       break;
+
+               case IL_MIMO2_SWITCH_GI:
+                       if (!tbl->is_ht40 &&
+                           !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+                               break;
+                       if (tbl->is_ht40 &&
+                           !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+                               break;
+
+                       D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+                       /* Set up new search table for MIMO2 */
+                       memcpy(search_tbl, tbl, sz);
+                       search_tbl->is_SGI = !tbl->is_SGI;
+                       il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+                       /*
+                        * If active table already uses the fastest possible
+                        * modulation (dual stream with short guard interval),
+                        * and it's working well, there's no need to look
+                        * for a better type of modulation!
+                        */
+                       if (tbl->is_SGI) {
+                               s32 tpt = lq_sta->last_tpt / 100;
+                               if (tpt >= search_tbl->expected_tpt[idx])
+                                       break;
+                       }
+                       search_tbl->current_rate =
+                           il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+                                                        is_green);
+                       update_search_tbl_counter = 1;
+                       goto out;
+
+               }
+               tbl->action++;
+               if (tbl->action > IL_MIMO2_SWITCH_GI)
+                       tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+               if (tbl->action == start_action)
+                       break;
+       }
+       search_tbl->lq_type = LQ_NONE;
+       return 0;
+out:
+       lq_sta->search_better_tbl = 1;
+       tbl->action++;
+       if (tbl->action > IL_MIMO2_SWITCH_GI)
+               tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+       if (update_search_tbl_counter)
+               search_tbl->action = tbl->action;
+
+       return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+       struct il_scale_tbl_info *tbl;
+       int i;
+       int active_tbl;
+       int flush_interval_passed = 0;
+       struct il_priv *il;
+
+       il = lq_sta->drv;
+       active_tbl = lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       /* If we've been disallowing search, see if we should now allow it */
+       if (lq_sta->stay_in_tbl) {
+
+               /* Elapsed time using current modulation mode */
+               if (lq_sta->flush_timer)
+                       flush_interval_passed =
+                           time_after(jiffies,
+                                      (unsigned long)(lq_sta->flush_timer +
+                                                      RATE_SCALE_FLUSH_INTVL));
+
+               /*
+                * Check if we should allow search for new modulation mode.
+                * If many frames have failed or succeeded, or we've used
+                * this same modulation for a long time, allow search, and
+                * reset history stats that keep track of whether we should
+                * allow a new search.  Also (below) reset all bitmaps and
+                * stats in active history.
+                */
+               if (force_search ||
+                   lq_sta->total_failed > lq_sta->max_failure_limit ||
+                   lq_sta->total_success > lq_sta->max_success_limit ||
+                   (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+                    flush_interval_passed)) {
+                       D_RATE("LQ: stay is expired %d %d %d\n:",
+                              lq_sta->total_failed, lq_sta->total_success,
+                              flush_interval_passed);
+
+                       /* Allow search for new mode */
+                       lq_sta->stay_in_tbl = 0;        /* only place reset */
+                       lq_sta->total_failed = 0;
+                       lq_sta->total_success = 0;
+                       lq_sta->flush_timer = 0;
+
+                       /*
+                        * Else if we've used this modulation mode enough repetitions
+                        * (regardless of elapsed time or success/failure), reset
+                        * history bitmaps and rate-specific stats for all rates in
+                        * active table.
+                        */
+               } else {
+                       lq_sta->table_count++;
+                       if (lq_sta->table_count >= lq_sta->table_count_limit) {
+                               lq_sta->table_count = 0;
+
+                               D_RATE("LQ: stay in table clear win\n");
+                               for (i = 0; i < RATE_COUNT; i++)
+                                       il4965_rs_rate_scale_clear_win(&
+                                                                      (tbl->
+                                                                       win
+                                                                       [i]));
+                       }
+               }
+
+               /* If transitioning to allow "search", reset all history
+                * bitmaps and stats in active table (this will become the new
+                * "search" table). */
+               if (!lq_sta->stay_in_tbl) {
+                       for (i = 0; i < RATE_COUNT; i++)
+                               il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+               }
+       }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
+                         struct il_lq_sta *lq_sta,
+                         struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+       u32 rate;
+
+       /* Update uCode's rate table. */
+       rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+       il4965_rs_fill_link_cmd(il, lq_sta, rate);
+       il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+                            struct ieee80211_sta *sta,
+                            struct il_lq_sta *lq_sta)
+{
+       struct ieee80211_hw *hw = il->hw;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int low = RATE_INVALID;
+       int high = RATE_INVALID;
+       int idx;
+       int i;
+       struct il_rate_scale_data *win = NULL;
+       int current_tpt = IL_INVALID_VALUE;
+       int low_tpt = IL_INVALID_VALUE;
+       int high_tpt = IL_INVALID_VALUE;
+       u32 fail_count;
+       s8 scale_action = 0;
+       u16 rate_mask;
+       u8 update_lq = 0;
+       struct il_scale_tbl_info *tbl, *tbl1;
+       u16 rate_scale_idx_msk = 0;
+       u8 is_green = 0;
+       u8 active_tbl = 0;
+       u8 done_search = 0;
+       u16 high_low;
+       s32 sr;
+       u8 tid = MAX_TID_COUNT;
+       struct il_tid_data *tid_data;
+       struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+       D_RATE("rate scale calculate new rate for skb\n");
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       /* TODO: this could probably be improved.. */
+       if (!ieee80211_is_data(hdr->frame_control) ||
+           (info->flags & IEEE80211_TX_CTL_NO_ACK))
+               return;
+
+       lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+       tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+       if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+               tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+               if (tid_data->agg.state == IL_AGG_OFF)
+                       lq_sta->is_agg = 0;
+               else
+                       lq_sta->is_agg = 1;
+       } else
+               lq_sta->is_agg = 0;
+
+       /*
+        * Select rate-scale / modulation-mode table to work with in
+        * the rest of this function:  "search" if searching for better
+        * modulation mode, or "active" if doing rate scaling within a mode.
+        */
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+       if (is_legacy(tbl->lq_type))
+               lq_sta->is_green = 0;
+       else
+               lq_sta->is_green = il4965_rs_use_green(sta);
+       is_green = lq_sta->is_green;
+
+       /* current tx rate */
+       idx = lq_sta->last_txrate_idx;
+
+       D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+       /* rates available for this association, and for modulation mode */
+       rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+       D_RATE("mask 0x%04X\n", rate_mask);
+
+       /* mask with station rate restriction */
+       if (is_legacy(tbl->lq_type)) {
+               if (lq_sta->band == IEEE80211_BAND_5GHZ)
+                       /* supp_rates has no CCK bits in A mode */
+                       rate_scale_idx_msk =
+                           (u16) (rate_mask &
+                                  (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+               else
+                       rate_scale_idx_msk =
+                           (u16) (rate_mask & lq_sta->supp_rates);
+
+       } else
+               rate_scale_idx_msk = rate_mask;
+
+       if (!rate_scale_idx_msk)
+               rate_scale_idx_msk = rate_mask;
+
+       if (!((1 << idx) & rate_scale_idx_msk)) {
+               IL_ERR("Current Rate is not valid\n");
+               if (lq_sta->search_better_tbl) {
+                       /* revert to active table if search table is not valid */
+                       tbl->lq_type = LQ_NONE;
+                       lq_sta->search_better_tbl = 0;
+                       tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+                       /* get "active" rate info */
+                       idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+                       il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+                                                     is_green);
+               }
+               return;
+       }
+
+       /* Get expected throughput table and history win for current rate */
+       if (!tbl->expected_tpt) {
+               IL_ERR("tbl->expected_tpt is NULL\n");
+               return;
+       }
+
+       /* force user max rate if set by user */
+       if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+               idx = lq_sta->max_rate_idx;
+               update_lq = 1;
+               win = &(tbl->win[idx]);
+               goto lq_update;
+       }
+
+       win = &(tbl->win[idx]);
+
+       /*
+        * If there is not enough history to calculate actual average
+        * throughput, keep analyzing results of more tx frames, without
+        * changing rate or mode (bypass most of the rest of this function).
+        * Set up new rate table in uCode only if old rate is not supported
+        * in current association (use new rate found above).
+        */
+       fail_count = win->counter - win->success_counter;
+       if (fail_count < RATE_MIN_FAILURE_TH &&
+           win->success_counter < RATE_MIN_SUCCESS_TH) {
+               D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+                      win->success_counter, win->counter, idx);
+
+               /* Can't calculate this yet; not enough history */
+               win->average_tpt = IL_INVALID_VALUE;
+
+               /* Should we stay with this modulation mode,
+                * or search for a new one? */
+               il4965_rs_stay_in_table(lq_sta, false);
+
+               goto out;
+       }
+       /* Else we have enough samples; calculate estimate of
+        * actual average throughput */
+       if (win->average_tpt !=
+           ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+               IL_ERR("expected_tpt should have been calculated by now\n");
+               win->average_tpt =
+                   ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+       }
+
+       /* If we are searching for better modulation mode, check success. */
+       if (lq_sta->search_better_tbl) {
+               /* If good success, continue using the "search" mode;
+                * no need to send new link quality command, since we're
+                * continuing to use the setup that we've been trying. */
+               if (win->average_tpt > lq_sta->last_tpt) {
+
+                       D_RATE("LQ: SWITCHING TO NEW TBL "
+                              "suc=%d cur-tpt=%d old-tpt=%d\n",
+                              win->success_ratio, win->average_tpt,
+                              lq_sta->last_tpt);
+
+                       if (!is_legacy(tbl->lq_type))
+                               lq_sta->enable_counter = 1;
+
+                       /* Swap tables; "search" becomes "active" */
+                       lq_sta->active_tbl = active_tbl;
+                       current_tpt = win->average_tpt;
+
+                       /* Else poor success; go back to mode in "active" table */
+               } else {
+
+                       D_RATE("LQ: GOING BACK TO THE OLD TBL "
+                              "suc=%d cur-tpt=%d old-tpt=%d\n",
+                              win->success_ratio, win->average_tpt,
+                              lq_sta->last_tpt);
+
+                       /* Nullify "search" table */
+                       tbl->lq_type = LQ_NONE;
+
+                       /* Revert to "active" table */
+                       active_tbl = lq_sta->active_tbl;
+                       tbl = &(lq_sta->lq_info[active_tbl]);
+
+                       /* Revert to "active" rate and throughput info */
+                       idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+                       current_tpt = lq_sta->last_tpt;
+
+                       /* Need to set up a new rate table in uCode */
+                       update_lq = 1;
+               }
+
+               /* Either way, we've made a decision; modulation mode
+                * search is done, allow rate adjustment next time. */
+               lq_sta->search_better_tbl = 0;
+               done_search = 1;        /* Don't switch modes below! */
+               goto lq_update;
+       }
+
+       /* (Else) not in search of better modulation mode, try for better
+        * starting rate, while staying in this mode. */
+       high_low =
+           il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+                                       tbl->lq_type);
+       low = high_low & 0xff;
+       high = (high_low >> 8) & 0xff;
+
+       /* If user set max rate, dont allow higher than user constrain */
+       if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+               high = RATE_INVALID;
+
+       sr = win->success_ratio;
+
+       /* Collect measured throughputs for current and adjacent rates */
+       current_tpt = win->average_tpt;
+       if (low != RATE_INVALID)
+               low_tpt = tbl->win[low].average_tpt;
+       if (high != RATE_INVALID)
+               high_tpt = tbl->win[high].average_tpt;
+
+       scale_action = 0;
+
+       /* Too many failures, decrease rate */
+       if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+               D_RATE("decrease rate because of low success_ratio\n");
+               scale_action = -1;
+
+               /* No throughput measured yet for adjacent rates; try increase. */
+       } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+               if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+                       scale_action = 1;
+               else if (low != RATE_INVALID)
+                       scale_action = 0;
+       }
+
+       /* Both adjacent throughputs are measured, but neither one has better
+        * throughput; we're using the best rate, don't change it! */
+       else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+                low_tpt < current_tpt && high_tpt < current_tpt)
+               scale_action = 0;
+
+       /* At least one adjacent rate's throughput is measured,
+        * and may have better performance. */
+       else {
+               /* Higher adjacent rate's throughput is measured */
+               if (high_tpt != IL_INVALID_VALUE) {
+                       /* Higher rate has better throughput */
+                       if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+                               scale_action = 1;
+                       else
+                               scale_action = 0;
+
+                       /* Lower adjacent rate's throughput is measured */
+               } else if (low_tpt != IL_INVALID_VALUE) {
+                       /* Lower rate has better throughput */
+                       if (low_tpt > current_tpt) {
+                               D_RATE("decrease rate because of low tpt\n");
+                               scale_action = -1;
+                       } else if (sr >= RATE_INCREASE_TH) {
+                               scale_action = 1;
+                       }
+               }
+       }
+
+       /* Sanity check; asked for decrease, but success rate or throughput
+        * has been good at old rate.  Don't change it. */
+       if (scale_action == -1 && low != RATE_INVALID &&
+           (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+               scale_action = 0;
+
+       switch (scale_action) {
+       case -1:
+               /* Decrease starting rate, update uCode's rate table */
+               if (low != RATE_INVALID) {
+                       update_lq = 1;
+                       idx = low;
+               }
+
+               break;
+       case 1:
+               /* Increase starting rate, update uCode's rate table */
+               if (high != RATE_INVALID) {
+                       update_lq = 1;
+                       idx = high;
+               }
+
+               break;
+       case 0:
+               /* No change */
+       default:
+               break;
+       }
+
+       D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+              idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+       /* Replace uCode's rate table for the destination station. */
+       if (update_lq)
+               il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+                                             is_green);
+
+       /* Should we stay with this modulation mode,
+        * or search for a new one? */
+       il4965_rs_stay_in_table(lq_sta, false);
+
+       /*
+        * Search for new modulation mode if we're:
+        * 1)  Not changing rates right now
+        * 2)  Not just finishing up a search
+        * 3)  Allowing a new search
+        */
+       if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+               /* Save current throughput to compare with "search" throughput */
+               lq_sta->last_tpt = current_tpt;
+
+               /* Select a new "search" modulation mode to try.
+                * If one is found, set up the new "search" table. */
+               if (is_legacy(tbl->lq_type))
+                       il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+               else if (is_siso(tbl->lq_type))
+                       il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+                                                    idx);
+               else            /* (is_mimo2(tbl->lq_type)) */
+                       il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+                                                     idx);
+
+               /* If new "search" mode was selected, set up in uCode table */
+               if (lq_sta->search_better_tbl) {
+                       /* Access the "search" table, clear its history. */
+                       tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+                       for (i = 0; i < RATE_COUNT; i++)
+                               il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+                       /* Use new "search" start rate */
+                       idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+                       D_RATE("Switch current  mcs: %X idx: %d\n",
+                              tbl->current_rate, idx);
+                       il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+                       il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+               } else
+                       done_search = 1;
+       }
+
+       if (done_search && !lq_sta->stay_in_tbl) {
+               /* If the "active" (non-search) mode was legacy,
+                * and we've tried switching antennas,
+                * but we haven't been able to try HT modes (not available),
+                * stay with best antenna legacy modulation for a while
+                * before next round of mode comparisons. */
+               tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+               if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+                   lq_sta->action_counter > tbl1->max_search) {
+                       D_RATE("LQ: STAY in legacy table\n");
+                       il4965_rs_set_stay_in_table(il, 1, lq_sta);
+               }
+
+               /* If we're in an HT mode, and all 3 mode switch actions
+                * have been tried and compared, stay in this best modulation
+                * mode for a while before next round of mode comparisons. */
+               if (lq_sta->enable_counter &&
+                   lq_sta->action_counter >= tbl1->max_search) {
+                       if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+                           (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+                           tid != MAX_TID_COUNT) {
+                               tid_data =
+                                   &il->stations[lq_sta->lq.sta_id].tid[tid];
+                               if (tid_data->agg.state == IL_AGG_OFF) {
+                                       D_RATE("try to aggregate tid %d\n",
+                                              tid);
+                                       il4965_rs_tl_turn_on_agg(il, tid,
+                                                                lq_sta, sta);
+                               }
+                       }
+                       il4965_rs_set_stay_in_table(il, 0, lq_sta);
+               }
+       }
+
+out:
+       tbl->current_rate =
+           il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+       i = idx;
+       lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+                       struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+       struct il_scale_tbl_info *tbl;
+       int rate_idx;
+       int i;
+       u32 rate;
+       u8 use_green = il4965_rs_use_green(sta);
+       u8 active_tbl = 0;
+       u8 valid_tx_ant;
+       struct il_station_priv *sta_priv;
+       struct il_rxon_context *ctx;
+
+       if (!sta || !lq_sta)
+               return;
+
+       sta_priv = (void *)sta->drv_priv;
+       ctx = sta_priv->common.ctx;
+
+       i = lq_sta->last_txrate_idx;
+
+       valid_tx_ant = il->hw_params.valid_tx_ant;
+
+       if (!lq_sta->search_better_tbl)
+               active_tbl = lq_sta->active_tbl;
+       else
+               active_tbl = 1 - lq_sta->active_tbl;
+
+       tbl = &(lq_sta->lq_info[active_tbl]);
+
+       if (i < 0 || i >= RATE_COUNT)
+               i = 0;
+
+       rate = il_rates[i].plcp;
+       tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+       rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+       if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+               rate |= RATE_MCS_CCK_MSK;
+
+       il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+       if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+               il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+       rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+       tbl->current_rate = rate;
+       il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+       il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+       il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+       il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+                  struct ieee80211_tx_rate_control *txrc)
+{
+
+       struct sk_buff *skb = txrc->skb;
+       struct ieee80211_supported_band *sband = txrc->sband;
+       struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct il_lq_sta *lq_sta = il_sta;
+       int rate_idx;
+
+       D_RATE("rate scale calculate new rate for skb\n");
+
+       /* Get max rate if user set max rate */
+       if (lq_sta) {
+               lq_sta->max_rate_idx = txrc->max_rate_idx;
+               if (sband->band == IEEE80211_BAND_5GHZ &&
+                   lq_sta->max_rate_idx != -1)
+                       lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+               if (lq_sta->max_rate_idx < 0 ||
+                   lq_sta->max_rate_idx >= RATE_COUNT)
+                       lq_sta->max_rate_idx = -1;
+       }
+
+       /* Treat uninitialized rate scaling data same as non-existing. */
+       if (lq_sta && !lq_sta->drv) {
+               D_RATE("Rate scaling not initialized yet.\n");
+               il_sta = NULL;
+       }
+
+       /* Send management frames and NO_ACK data using lowest rate. */
+       if (rate_control_send_low(sta, il_sta, txrc))
+               return;
+
+       if (!lq_sta)
+               return;
+
+       rate_idx = lq_sta->last_txrate_idx;
+
+       if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+               rate_idx -= IL_FIRST_OFDM_RATE;
+               /* 6M and 9M shared same MCS idx */
+               rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+               if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+                   RATE_MIMO2_6M_PLCP)
+                       rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+               info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+                       info->control.rates[0].flags |=
+                           IEEE80211_TX_RC_SHORT_GI;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+                       info->control.rates[0].flags |=
+                           IEEE80211_TX_RC_DUP_DATA;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+                       info->control.rates[0].flags |=
+                           IEEE80211_TX_RC_40_MHZ_WIDTH;
+               if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+                       info->control.rates[0].flags |=
+                           IEEE80211_TX_RC_GREEN_FIELD;
+       } else {
+               /* Check for invalid rates */
+               if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+                   (sband->band == IEEE80211_BAND_5GHZ &&
+                    rate_idx < IL_FIRST_OFDM_RATE))
+                       rate_idx = rate_lowest_index(sband, sta);
+               /* On valid 5 GHz rate, adjust idx */
+               else if (sband->band == IEEE80211_BAND_5GHZ)
+                       rate_idx -= IL_FIRST_OFDM_RATE;
+               info->control.rates[0].flags = 0;
+       }
+       info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+       struct il_station_priv *sta_priv =
+           (struct il_station_priv *)sta->drv_priv;
+       struct il_priv *il;
+
+       il = (struct il_priv *)il_rate;
+       D_RATE("create station rate scale win\n");
+
+       return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+       int i, j;
+       struct ieee80211_hw *hw = il->hw;
+       struct ieee80211_conf *conf = &il->hw->conf;
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct il_station_priv *sta_priv;
+       struct il_lq_sta *lq_sta;
+       struct ieee80211_supported_band *sband;
+
+       sta_priv = (struct il_station_priv *)sta->drv_priv;
+       lq_sta = &sta_priv->lq_sta;
+       sband = hw->wiphy->bands[conf->channel->band];
+
+       lq_sta->lq.sta_id = sta_id;
+
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < RATE_COUNT; i++)
+                       il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+                                                      win[i]);
+
+       lq_sta->flush_timer = 0;
+       lq_sta->supp_rates = sta->supp_rates[sband->band];
+       for (j = 0; j < LQ_SIZE; j++)
+               for (i = 0; i < RATE_COUNT; i++)
+                       il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+                                                      win[i]);
+
+       D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+              sta_id);
+       /* TODO: what is a good starting rate for STA? About middle? Maybe not
+        * the lowest or the highest rate.. Could consider using RSSI from
+        * previous packets? Need to have IEEE 802.1X auth succeed immediately
+        * after assoc.. */
+
+       lq_sta->is_dup = 0;
+       lq_sta->max_rate_idx = -1;
+       lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+       lq_sta->is_green = il4965_rs_use_green(sta);
+       lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+       lq_sta->band = il->band;
+       /*
+        * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+        * supp_rates[] does not; shift to convert format, force 9 MBits off.
+        */
+       lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+       lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+       lq_sta->active_siso_rate &= ~((u16) 0x2);
+       lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+       /* Same here */
+       lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+       lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+       lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+       lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+       /* These values will be overridden later */
+       lq_sta->lq.general_params.single_stream_ant_msk =
+           il4965_first_antenna(il->hw_params.valid_tx_ant);
+       lq_sta->lq.general_params.dual_stream_ant_msk =
+           il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+                                                              valid_tx_ant);
+       if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+               lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+       } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+               lq_sta->lq.general_params.dual_stream_ant_msk =
+                   il->hw_params.valid_tx_ant;
+       }
+
+       /* as default allow aggregation for all tids */
+       lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+       lq_sta->drv = il;
+
+       /* Set last_txrate_idx to lowest rate */
+       lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+       if (sband->band == IEEE80211_BAND_5GHZ)
+               lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+       lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+       lq_sta->dbg_fixed_rate = 0;
+#endif
+
+       il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+                       u32 new_rate)
+{
+       struct il_scale_tbl_info tbl_type;
+       int idx = 0;
+       int rate_idx;
+       int repeat_rate = 0;
+       u8 ant_toggle_cnt = 0;
+       u8 use_ht_possible = 1;
+       u8 valid_tx_ant = 0;
+       struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+       /* Override starting rate (idx 0) if needed for debug purposes */
+       il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+       /* Interpret new_rate (rate_n_flags) */
+       il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+                                       &rate_idx);
+
+       /* How many times should we repeat the initial rate? */
+       if (is_legacy(tbl_type.lq_type)) {
+               ant_toggle_cnt = 1;
+               repeat_rate = IL_NUMBER_TRY;
+       } else {
+               repeat_rate = IL_HT_NUMBER_TRY;
+       }
+
+       lq_cmd->general_params.mimo_delimiter =
+           is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+       /* Fill 1st table entry (idx 0) */
+       lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+       if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+               lq_cmd->general_params.single_stream_ant_msk =
+                   tbl_type.ant_type;
+       } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+               lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+       }
+       /* otherwise we don't modify the existing value */
+       idx++;
+       repeat_rate--;
+       if (il)
+               valid_tx_ant = il->hw_params.valid_tx_ant;
+
+       /* Fill rest of rate table */
+       while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+               /* Repeat initial/next rate.
+                * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+                * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+               while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+                       if (is_legacy(tbl_type.lq_type)) {
+                               if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                                       ant_toggle_cnt++;
+                               else if (il &&
+                                        il4965_rs_toggle_antenna(valid_tx_ant,
+                                                                 &new_rate,
+                                                                 &tbl_type))
+                                       ant_toggle_cnt = 1;
+                       }
+
+                       /* Override next rate if needed for debug purposes */
+                       il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+                       /* Fill next table entry */
+                       lq_cmd->rs_table[idx].rate_n_flags =
+                           cpu_to_le32(new_rate);
+                       repeat_rate--;
+                       idx++;
+               }
+
+               il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+                                               &tbl_type, &rate_idx);
+
+               /* Indicate to uCode which entries might be MIMO.
+                * If initial rate was MIMO, this will finally end up
+                * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+               if (is_mimo(tbl_type.lq_type))
+                       lq_cmd->general_params.mimo_delimiter = idx;
+
+               /* Get next rate */
+               new_rate =
+                   il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+                                            use_ht_possible);
+
+               /* How many times should we repeat the next rate? */
+               if (is_legacy(tbl_type.lq_type)) {
+                       if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+                               ant_toggle_cnt++;
+                       else if (il &&
+                                il4965_rs_toggle_antenna(valid_tx_ant,
+                                                         &new_rate, &tbl_type))
+                               ant_toggle_cnt = 1;
+
+                       repeat_rate = IL_NUMBER_TRY;
+               } else {
+                       repeat_rate = IL_HT_NUMBER_TRY;
+               }
+
+               /* Don't allow HT rates after next pass.
+                * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+               use_ht_possible = 0;
+
+               /* Override next rate if needed for debug purposes */
+               il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+               /* Fill next table entry */
+               lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+               idx++;
+               repeat_rate--;
+       }
+
+       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+       lq_cmd->agg_params.agg_time_limit =
+           cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+       return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+       return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+       struct il_priv *il __maybe_unused = il_r;
+
+       D_RATE("enter\n");
+       D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il4965_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+       struct il_priv *il;
+       u8 valid_tx_ant;
+       u8 ant_sel_tx;
+
+       il = lq_sta->drv;
+       valid_tx_ant = il->hw_params.valid_tx_ant;
+       if (lq_sta->dbg_fixed_rate) {
+               ant_sel_tx =
+                   ((lq_sta->
+                     dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+                    RATE_MCS_ANT_POS);
+               if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+                       *rate_n_flags = lq_sta->dbg_fixed_rate;
+                       D_RATE("Fixed rate ON\n");
+               } else {
+                       lq_sta->dbg_fixed_rate = 0;
+                       IL_ERR
+                           ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+                            ant_sel_tx, valid_tx_ant);
+                       D_RATE("Fixed rate OFF\n");
+               }
+       } else {
+               D_RATE("Fixed rate OFF\n");
+       }
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+                                     const char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct il_lq_sta *lq_sta = file->private_data;
+       struct il_priv *il;
+       char buf[64];
+       size_t buf_size;
+       u32 parsed_rate;
+       struct il_station_priv *sta_priv =
+           container_of(lq_sta, struct il_station_priv, lq_sta);
+       struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+       il = lq_sta->drv;
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x", &parsed_rate) == 1)
+               lq_sta->dbg_fixed_rate = parsed_rate;
+       else
+               lq_sta->dbg_fixed_rate = 0;
+
+       lq_sta->active_legacy_rate = 0x0FFF;    /* 1 - 54 MBits, includes CCK */
+       lq_sta->active_siso_rate = 0x1FD0;      /* 6 - 60 MBits, no 9, no CCK */
+       lq_sta->active_mimo2_rate = 0x1FD0;     /* 6 - 60 MBits, no 9, no CCK */
+
+       D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+              lq_sta->dbg_fixed_rate);
+
+       if (lq_sta->dbg_fixed_rate) {
+               il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+               il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+       }
+
+       return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i = 0;
+       int idx = 0;
+       ssize_t ret;
+
+       struct il_lq_sta *lq_sta = file->private_data;
+       struct il_priv *il;
+       struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+       il = lq_sta->drv;
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+       desc +=
+           sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+                   lq_sta->total_failed, lq_sta->total_success,
+                   lq_sta->active_legacy_rate);
+       desc +=
+           sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+       desc +=
+           sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+                   (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+                   (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+                   (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+       desc +=
+           sprintf(buff + desc, "lq type %s\n",
+                   (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+       if (is_Ht(tbl->lq_type)) {
+               desc +=
+                   sprintf(buff + desc, " %s",
+                           (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+               desc +=
+                   sprintf(buff + desc, " %s",
+                           (tbl->is_ht40) ? "40MHz" : "20MHz");
+               desc +=
+                   sprintf(buff + desc, " %s %s %s\n",
+                           (tbl->is_SGI) ? "SGI" : "",
+                           (lq_sta->is_green) ? "GF enabled" : "",
+                           (lq_sta->is_agg) ? "AGG on" : "");
+       }
+       desc +=
+           sprintf(buff + desc, "last tx rate=0x%X\n",
+                   lq_sta->last_rate_n_flags);
+       desc +=
+           sprintf(buff + desc,
+                   "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+                   lq_sta->lq.general_params.flags,
+                   lq_sta->lq.general_params.mimo_delimiter,
+                   lq_sta->lq.general_params.single_stream_ant_msk,
+                   lq_sta->lq.general_params.dual_stream_ant_msk);
+
+       desc +=
+           sprintf(buff + desc,
+                   "agg:"
+                   "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+                   le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+                   lq_sta->lq.agg_params.agg_dis_start_th,
+                   lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+       desc +=
+           sprintf(buff + desc,
+                   "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+                   lq_sta->lq.general_params.start_rate_idx[0],
+                   lq_sta->lq.general_params.start_rate_idx[1],
+                   lq_sta->lq.general_params.start_rate_idx[2],
+                   lq_sta->lq.general_params.start_rate_idx[3]);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               idx =
+                   il4965_hwrate_to_plcp_idx(le32_to_cpu
+                                             (lq_sta->lq.rs_table[i].
+                                              rate_n_flags));
+               if (is_legacy(tbl->lq_type)) {
+                       desc +=
+                           sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+                                   le32_to_cpu(lq_sta->lq.rs_table[i].
+                                               rate_n_flags),
+                                   il_rate_mcs[idx].mbps);
+               } else {
+                       desc +=
+                           sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+                                   i,
+                                   le32_to_cpu(lq_sta->lq.rs_table[i].
+                                               rate_n_flags),
+                                   il_rate_mcs[idx].mbps,
+                                   il_rate_mcs[idx].mcs);
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+       .write = il4965_rs_sta_dbgfs_scale_table_write,
+       .read = il4965_rs_sta_dbgfs_scale_table_read,
+       .open = il4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       char *buff;
+       int desc = 0;
+       int i, j;
+       ssize_t ret;
+
+       struct il_lq_sta *lq_sta = file->private_data;
+
+       buff = kmalloc(1024, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       for (i = 0; i < LQ_SIZE; i++) {
+               desc +=
+                   sprintf(buff + desc,
+                           "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+                           "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+                           lq_sta->lq_info[i].lq_type,
+                           lq_sta->lq_info[i].is_SGI,
+                           lq_sta->lq_info[i].is_ht40,
+                           lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+                           lq_sta->lq_info[i].current_rate);
+               for (j = 0; j < RATE_COUNT; j++) {
+                       desc +=
+                           sprintf(buff + desc,
+                                   "counter=%d success=%d %%=%d\n",
+                                   lq_sta->lq_info[i].win[j].counter,
+                                   lq_sta->lq_info[i].win[j].success_counter,
+                                   lq_sta->lq_info[i].win[j].success_ratio);
+               }
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+       kfree(buff);
+       return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+       .read = il4965_rs_sta_dbgfs_stats_table_read,
+       .open = il4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+                                        char __user *user_buf, size_t count,
+                                        loff_t *ppos)
+{
+       char buff[120];
+       int desc = 0;
+       struct il_lq_sta *lq_sta = file->private_data;
+       struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+       if (is_Ht(tbl->lq_type))
+               desc +=
+                   sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+                           tbl->expected_tpt[lq_sta->last_txrate_idx]);
+       else
+               desc +=
+                   sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+                           il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+       .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+       .open = il4965_open_file_generic,
+       .llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+       struct il_lq_sta *lq_sta = il_sta;
+       lq_sta->rs_sta_dbgfs_scale_table_file =
+           debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+                               lq_sta, &rs_sta_dbgfs_scale_table_ops);
+       lq_sta->rs_sta_dbgfs_stats_table_file =
+           debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
+                               &rs_sta_dbgfs_stats_table_ops);
+       lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+           debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
+                               &rs_sta_dbgfs_rate_scale_data_ops);
+       lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+           debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+                             &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+       struct il_lq_sta *lq_sta = il_sta;
+       debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+       debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+                        struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_4965_ops = {
+       .module = NULL,
+       .name = IL4965_RS_NAME,
+       .tx_status = il4965_rs_tx_status,
+       .get_rate = il4965_rs_get_rate,
+       .rate_init = il4965_rs_rate_init_stub,
+       .alloc = il4965_rs_alloc,
+       .free = il4965_rs_free,
+       .alloc_sta = il4965_rs_alloc_sta,
+       .free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+       .add_sta_debugfs = il4965_rs_add_debugfs,
+       .remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+       return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+       ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644 (file)
index 0000000..84c54dc
--- /dev/null
@@ -0,0 +1,2421 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+       u32 val;
+       int ret = 0;
+       u32 errcnt = 0;
+       u32 i;
+
+       D_INFO("ucode inst image size is %u\n", len);
+
+       for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IL_DL_IO is set */
+               il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+               val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       ret = -EIO;
+                       errcnt++;
+                       if (errcnt >= 3)
+                               break;
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+       u32 val;
+       u32 save_len = len;
+       int ret = 0;
+       u32 errcnt;
+
+       D_INFO("ucode inst image size is %u\n", len);
+
+       il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+       errcnt = 0;
+       for (; len > 0; len -= sizeof(u32), image++) {
+               /* read data comes through single port, auto-incr addr */
+               /* NOTE: Use the debugless read so we don't flood kernel log
+                * if IL_DL_IO is set */
+               val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+               if (val != le32_to_cpu(*image)) {
+                       IL_ERR("uCode INST section is invalid at "
+                              "offset 0x%x, is 0x%x, s/b 0x%x\n",
+                              save_len - len, val, le32_to_cpu(*image));
+                       ret = -EIO;
+                       errcnt++;
+                       if (errcnt >= 20)
+                               break;
+               }
+       }
+
+       if (!errcnt)
+               D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+       return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+       __le32 *image;
+       u32 len;
+       int ret;
+
+       /* Try bootstrap */
+       image = (__le32 *) il->ucode_boot.v_addr;
+       len = il->ucode_boot.len;
+       ret = il4965_verify_inst_sparse(il, image, len);
+       if (!ret) {
+               D_INFO("Bootstrap uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try initialize */
+       image = (__le32 *) il->ucode_init.v_addr;
+       len = il->ucode_init.len;
+       ret = il4965_verify_inst_sparse(il, image, len);
+       if (!ret) {
+               D_INFO("Initialize uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       /* Try runtime/protocol */
+       image = (__le32 *) il->ucode_code.v_addr;
+       len = il->ucode_code.len;
+       ret = il4965_verify_inst_sparse(il, image, len);
+       if (!ret) {
+               D_INFO("Runtime uCode is good in inst SRAM\n");
+               return 0;
+       }
+
+       IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+       /* Since nothing seems to match, show first several data entries in
+        * instruction SRAM, so maybe visual inspection will give a clue.
+        * Selection of bootstrap image (vs. other images) is arbitrary. */
+       image = (__le32 *) il->ucode_boot.v_addr;
+       len = il->ucode_boot.len;
+       ret = il4965_verify_inst_full(il, image, len);
+
+       return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+       u16 count;
+       int ret;
+
+       for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+               /* Request semaphore */
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+               /* See if we got it */
+               ret =
+                   _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+                                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+                                EEPROM_SEM_TIMEOUT);
+               if (ret >= 0)
+                       return ret;
+       }
+
+       return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+       il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+                    CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+       u16 eeprom_ver;
+       u16 calib_ver;
+
+       eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+       calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+       if (eeprom_ver < il->cfg->eeprom_ver ||
+           calib_ver < il->cfg->eeprom_calib_ver)
+               goto err;
+
+       IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+       return 0;
+err:
+       IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+              "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+              calib_ver, il->cfg->eeprom_calib_ver);
+       return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+       const u8 *addr = il_eeprom_query_addr(il,
+                                             EEPROM_MAC_ADDRESS);
+       memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+       struct il_host_cmd cmd = {
+               .id = C_LEDS,
+               .len = sizeof(struct il_led_cmd),
+               .data = led_cmd,
+               .flags = CMD_ASYNC,
+               .callback = NULL,
+       };
+       u32 reg;
+
+       reg = _il_rd(il, CSR_LED_REG);
+       if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+               _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+       return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+       _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct il_led_ops il4965_led_ops = {
+       .cmd = il4965_send_led_cmd,
+};
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+       __le32 *image = il->ucode_boot.v_addr;
+       u32 len = il->ucode_boot.len;
+       u32 reg;
+       u32 val;
+
+       D_INFO("Begin verify bsm\n");
+
+       /* verify BSM SRAM contents */
+       val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+       for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+            reg += sizeof(u32), image++) {
+               val = il_rd_prph(il, reg);
+               if (val != le32_to_cpu(*image)) {
+                       IL_ERR("BSM uCode verification failed at "
+                              "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+                              BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+                              len, val, le32_to_cpu(*image));
+                       return -EIO;
+               }
+       }
+
+       D_INFO("BSM bootstrap uCode image OK\n");
+
+       return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL.  When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image.  This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+       __le32 *image = il->ucode_boot.v_addr;
+       u32 len = il->ucode_boot.len;
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+       u32 inst_len;
+       u32 data_len;
+       int i;
+       u32 done;
+       u32 reg_offset;
+       int ret;
+
+       D_INFO("Begin load bsm\n");
+
+       il->ucode_type = UCODE_RT;
+
+       /* make sure bootstrap program is no larger than BSM's SRAM size */
+       if (len > IL49_MAX_BSM_SIZE)
+               return -EINVAL;
+
+       /* Tell bootstrap uCode where to find the "Initialize" uCode
+        *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+        * NOTE:  il_init_alive_start() will replace these values,
+        *        after the "initialize" uCode has run, to point to
+        *        runtime/protocol instructions and backup data cache.
+        */
+       pinst = il->ucode_init.p_addr >> 4;
+       pdata = il->ucode_init_data.p_addr >> 4;
+       inst_len = il->ucode_init.len;
+       data_len = il->ucode_init_data.len;
+
+       il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+       il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+       il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+       il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+       /* Fill BSM memory with bootstrap instructions */
+       for (reg_offset = BSM_SRAM_LOWER_BOUND;
+            reg_offset < BSM_SRAM_LOWER_BOUND + len;
+            reg_offset += sizeof(u32), image++)
+               _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+       ret = il4965_verify_bsm(il);
+       if (ret)
+               return ret;
+
+       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+       il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+       il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+       il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+       /* Load bootstrap code into instruction SRAM now,
+        *   to prepare to load "initialize" uCode */
+       il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+       /* Wait for load of bootstrap uCode to finish */
+       for (i = 0; i < 100; i++) {
+               done = il_rd_prph(il, BSM_WR_CTRL_REG);
+               if (!(done & BSM_WR_CTRL_REG_BIT_START))
+                       break;
+               udelay(10);
+       }
+       if (i < 100)
+               D_INFO("BSM write complete, poll %d iterations\n", i);
+       else {
+               IL_ERR("BSM write did not complete!\n");
+               return -EIO;
+       }
+
+       /* Enable future boot loads whenever power management unit triggers it
+        *   (e.g. when powering back up after power-save shutdown) */
+       il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+       return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+       dma_addr_t pinst;
+       dma_addr_t pdata;
+       int ret = 0;
+
+       /* bits 35:4 for 4965 */
+       pinst = il->ucode_code.p_addr >> 4;
+       pdata = il->ucode_data_backup.p_addr >> 4;
+
+       /* Tell bootstrap uCode where to find image to load */
+       il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+       il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+       il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+       /* Inst byte count must be last to set up, bit 31 signals uCode
+        *   that all new ptr/size info is in place */
+       il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+                  il->ucode_code.len | BSM_DRAM_INST_LOAD);
+       D_INFO("Runtime uCode pointers are set.\n");
+
+       return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ *   Voltage, temperature, and MIMO tx gain correction, now stored in il
+ *   (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+        * This is a paranoid check, because we would not have gotten the
+        * "initialize" alive if code weren't properly loaded.  */
+       if (il4965_verify_ucode(il)) {
+               /* Runtime instruction load was bad;
+                * take it all the way back down so we can try again */
+               D_INFO("Bad \"initialize\" uCode load.\n");
+               goto restart;
+       }
+
+       /* Calculate temperature */
+       il->temperature = il4965_hw_get_temperature(il);
+
+       /* Send pointers to protocol/runtime uCode image ... init code will
+        * load and launch runtime uCode, which will send us another "Alive"
+        * notification. */
+       D_INFO("Initialization Alive received.\n");
+       if (il4965_set_ucode_ptrs(il)) {
+               /* Runtime instruction load won't happen;
+                * take it all the way back down so we can try again */
+               D_INFO("Couldn't set up uCode pointers.\n");
+               goto restart;
+       }
+       return;
+
+restart:
+       queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+       int chan_mod =
+           le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+           RXON_FLG_CHANNEL_MODE_POS;
+       return (chan_mod == CHANNEL_MODE_PURE_40 ||
+               chan_mod == CHANNEL_MODE_MIXED);
+}
+
+static void
+il4965_nic_config(struct il_priv *il)
+{
+       unsigned long flags;
+       u16 radio_cfg;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+       /* write radio config values to register */
+       if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+               il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                          EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+                          EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+                          EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+       /* set CSR_HW_CONFIG_REG for uCode use */
+       il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                  CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+                  CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+       il->calib_info =
+           (struct il_eeprom_calib_info *)
+           il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+       spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ *  ... once chain noise is calibrated the first time, it's good forever.  */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+       struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+       if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+               struct il_calib_diff_gain_cmd cmd;
+
+               /* clear data for chain noise calibration algorithm */
+               data->chain_noise_a = 0;
+               data->chain_noise_b = 0;
+               data->chain_noise_c = 0;
+               data->chain_signal_a = 0;
+               data->chain_signal_b = 0;
+               data->chain_signal_c = 0;
+               data->beacon_count = 0;
+
+               memset(&cmd, 0, sizeof(cmd));
+               cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+               cmd.diff_gain_a = 0;
+               cmd.diff_gain_b = 0;
+               cmd.diff_gain_c = 0;
+               if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+                       IL_ERR("Could not send C_PHY_CALIBRATION\n");
+               data->state = IL_CHAIN_NOISE_ACCUMULATE;
+               D_CALIB("Run chain_noise_calibrate\n");
+       }
+}
+
+static struct il_sensitivity_ranges il4965_sensitivity = {
+       .min_nrg_cck = 97,
+       .max_nrg_cck = 0,       /* not used, set to 0 */
+
+       .auto_corr_min_ofdm = 85,
+       .auto_corr_min_ofdm_mrc = 170,
+       .auto_corr_min_ofdm_x1 = 105,
+       .auto_corr_min_ofdm_mrc_x1 = 220,
+
+       .auto_corr_max_ofdm = 120,
+       .auto_corr_max_ofdm_mrc = 210,
+       .auto_corr_max_ofdm_x1 = 140,
+       .auto_corr_max_ofdm_mrc_x1 = 270,
+
+       .auto_corr_min_cck = 125,
+       .auto_corr_max_cck = 200,
+       .auto_corr_min_cck_mrc = 200,
+       .auto_corr_max_cck_mrc = 400,
+
+       .nrg_th_cck = 100,
+       .nrg_th_ofdm = 100,
+
+       .barker_corr_th_min = 190,
+       .barker_corr_th_min_mrc = 390,
+       .nrg_th_cca = 62,
+};
+
+static void
+il4965_set_ct_threshold(struct il_priv *il)
+{
+       /* want Kelvin */
+       il->hw_params.ct_kill_threshold =
+           CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * il4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int
+il4965_hw_set_hw_params(struct il_priv *il)
+{
+       if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+           il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+               il->cfg->base_params->num_of_queues =
+                   il->cfg->mod_params->num_of_queues;
+
+       il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+       il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+       il->hw_params.scd_bc_tbls_size =
+           il->cfg->base_params->num_of_queues *
+           sizeof(struct il4965_scd_bc_tbl);
+       il->hw_params.tfd_size = sizeof(struct il_tfd);
+       il->hw_params.max_stations = IL4965_STATION_COUNT;
+       il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
+       il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+       il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+       il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+       il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+       il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+       il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+       il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+       il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+       il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+       il4965_set_ct_threshold(il);
+
+       il->hw_params.sens = &il4965_sensitivity;
+       il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+
+       return 0;
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+       s32 sign = 1;
+
+       if (num < 0) {
+               sign = -sign;
+               num = -num;
+       }
+       if (denom < 0) {
+               sign = -sign;
+               denom = -denom;
+       }
+       *res = 1;
+       *res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+       return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+       s32 comp = 0;
+
+       if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+           TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+               return 0;
+
+       il4965_math_div_round(current_voltage - eeprom_voltage,
+                             TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+       if (current_voltage > eeprom_voltage)
+               comp *= 2;
+       if ((comp < -2) || (comp > 2))
+               comp = 0;
+
+       return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+       if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+           channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+               return CALIB_CH_GROUP_5;
+
+       if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+           channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+               return CALIB_CH_GROUP_1;
+
+       if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+           channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+               return CALIB_CH_GROUP_2;
+
+       if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+           channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+               return CALIB_CH_GROUP_3;
+
+       if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+           channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+               return CALIB_CH_GROUP_4;
+
+       return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+       s32 b = -1;
+
+       for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+               if (il->calib_info->band_info[b].ch_from == 0)
+                       continue;
+
+               if (channel >= il->calib_info->band_info[b].ch_from &&
+                   channel <= il->calib_info->band_info[b].ch_to)
+                       break;
+       }
+
+       return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+       s32 val;
+
+       if (x2 == x1)
+               return y1;
+       else {
+               il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+               return val + y2;
+       }
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest.  Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+                       struct il_eeprom_calib_ch_info *chan_info)
+{
+       s32 s = -1;
+       u32 c;
+       u32 m;
+       const struct il_eeprom_calib_measure *m1;
+       const struct il_eeprom_calib_measure *m2;
+       struct il_eeprom_calib_measure *omeas;
+       u32 ch_i1;
+       u32 ch_i2;
+
+       s = il4965_get_sub_band(il, channel);
+       if (s >= EEPROM_TX_POWER_BANDS) {
+               IL_ERR("Tx Power can not find channel %d\n", channel);
+               return -1;
+       }
+
+       ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+       ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+       chan_info->ch_num = (u8) channel;
+
+       D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+                 ch_i1, ch_i2);
+
+       for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+               for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+                       m1 = &(il->calib_info->band_info[s].ch1.
+                              measurements[c][m]);
+                       m2 = &(il->calib_info->band_info[s].ch2.
+                              measurements[c][m]);
+                       omeas = &(chan_info->measurements[c][m]);
+
+                       omeas->actual_pow =
+                           (u8) il4965_interpolate_value(channel, ch_i1,
+                                                         m1->actual_pow, ch_i2,
+                                                         m2->actual_pow);
+                       omeas->gain_idx =
+                           (u8) il4965_interpolate_value(channel, ch_i1,
+                                                         m1->gain_idx, ch_i2,
+                                                         m2->gain_idx);
+                       omeas->temperature =
+                           (u8) il4965_interpolate_value(channel, ch_i1,
+                                                         m1->temperature,
+                                                         ch_i2,
+                                                         m2->temperature);
+                       omeas->pa_det =
+                           (s8) il4965_interpolate_value(channel, ch_i1,
+                                                         m1->pa_det, ch_i2,
+                                                         m2->pa_det);
+
+                       D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+                                 m, m1->actual_pow, m2->actual_pow,
+                                 omeas->actual_pow);
+                       D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+                                 m, m1->gain_idx, m2->gain_idx,
+                                 omeas->gain_idx);
+                       D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+                                 m, m1->pa_det, m2->pa_det, omeas->pa_det);
+                       D_TXPOWER("chain %d meas %d  T1=%d  T2=%d  T=%d\n", c,
+                                 m, m1->temperature, m2->temperature,
+                                 omeas->temperature);
+               }
+       }
+
+       return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
+       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
+       10                      /* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+       s32 degrees_per_05db_a;
+       s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+       {
+       9, 2},                  /* group 0 5.2, ch  34-43 */
+       {
+       4, 1},                  /* group 1 5.2, ch  44-70 */
+       {
+       4, 1},                  /* group 2 5.2, ch  71-124 */
+       {
+       4, 1},                  /* group 3 5.2, ch 125-200 */
+       {
+       3, 1}                   /* group 4 2.4, ch   all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+       if (!band) {
+               if ((rate_power_idx & 7) <= 4)
+                       return MIN_TX_GAIN_IDX_52GHZ_EXT;
+       }
+       return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+       u8 dsp;
+       u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+       /* 5.2GHz power gain idx table */
+       {
+        {123, 0x3F},           /* highest txpower */
+        {117, 0x3F},
+        {110, 0x3F},
+        {104, 0x3F},
+        {98, 0x3F},
+        {110, 0x3E},
+        {104, 0x3E},
+        {98, 0x3E},
+        {110, 0x3D},
+        {104, 0x3D},
+        {98, 0x3D},
+        {110, 0x3C},
+        {104, 0x3C},
+        {98, 0x3C},
+        {110, 0x3B},
+        {104, 0x3B},
+        {98, 0x3B},
+        {110, 0x3A},
+        {104, 0x3A},
+        {98, 0x3A},
+        {110, 0x39},
+        {104, 0x39},
+        {98, 0x39},
+        {110, 0x38},
+        {104, 0x38},
+        {98, 0x38},
+        {110, 0x37},
+        {104, 0x37},
+        {98, 0x37},
+        {110, 0x36},
+        {104, 0x36},
+        {98, 0x36},
+        {110, 0x35},
+        {104, 0x35},
+        {98, 0x35},
+        {110, 0x34},
+        {104, 0x34},
+        {98, 0x34},
+        {110, 0x33},
+        {104, 0x33},
+        {98, 0x33},
+        {110, 0x32},
+        {104, 0x32},
+        {98, 0x32},
+        {110, 0x31},
+        {104, 0x31},
+        {98, 0x31},
+        {110, 0x30},
+        {104, 0x30},
+        {98, 0x30},
+        {110, 0x25},
+        {104, 0x25},
+        {98, 0x25},
+        {110, 0x24},
+        {104, 0x24},
+        {98, 0x24},
+        {110, 0x23},
+        {104, 0x23},
+        {98, 0x23},
+        {110, 0x22},
+        {104, 0x18},
+        {98, 0x18},
+        {110, 0x17},
+        {104, 0x17},
+        {98, 0x17},
+        {110, 0x16},
+        {104, 0x16},
+        {98, 0x16},
+        {110, 0x15},
+        {104, 0x15},
+        {98, 0x15},
+        {110, 0x14},
+        {104, 0x14},
+        {98, 0x14},
+        {110, 0x13},
+        {104, 0x13},
+        {98, 0x13},
+        {110, 0x12},
+        {104, 0x08},
+        {98, 0x08},
+        {110, 0x07},
+        {104, 0x07},
+        {98, 0x07},
+        {110, 0x06},
+        {104, 0x06},
+        {98, 0x06},
+        {110, 0x05},
+        {104, 0x05},
+        {98, 0x05},
+        {110, 0x04},
+        {104, 0x04},
+        {98, 0x04},
+        {110, 0x03},
+        {104, 0x03},
+        {98, 0x03},
+        {110, 0x02},
+        {104, 0x02},
+        {98, 0x02},
+        {110, 0x01},
+        {104, 0x01},
+        {98, 0x01},
+        {110, 0x00},
+        {104, 0x00},
+        {98, 0x00},
+        {93, 0x00},
+        {88, 0x00},
+        {83, 0x00},
+        {78, 0x00},
+        },
+       /* 2.4GHz power gain idx table */
+       {
+        {110, 0x3f},           /* highest txpower */
+        {104, 0x3f},
+        {98, 0x3f},
+        {110, 0x3e},
+        {104, 0x3e},
+        {98, 0x3e},
+        {110, 0x3d},
+        {104, 0x3d},
+        {98, 0x3d},
+        {110, 0x3c},
+        {104, 0x3c},
+        {98, 0x3c},
+        {110, 0x3b},
+        {104, 0x3b},
+        {98, 0x3b},
+        {110, 0x3a},
+        {104, 0x3a},
+        {98, 0x3a},
+        {110, 0x39},
+        {104, 0x39},
+        {98, 0x39},
+        {110, 0x38},
+        {104, 0x38},
+        {98, 0x38},
+        {110, 0x37},
+        {104, 0x37},
+        {98, 0x37},
+        {110, 0x36},
+        {104, 0x36},
+        {98, 0x36},
+        {110, 0x35},
+        {104, 0x35},
+        {98, 0x35},
+        {110, 0x34},
+        {104, 0x34},
+        {98, 0x34},
+        {110, 0x33},
+        {104, 0x33},
+        {98, 0x33},
+        {110, 0x32},
+        {104, 0x32},
+        {98, 0x32},
+        {110, 0x31},
+        {104, 0x31},
+        {98, 0x31},
+        {110, 0x30},
+        {104, 0x30},
+        {98, 0x30},
+        {110, 0x6},
+        {104, 0x6},
+        {98, 0x6},
+        {110, 0x5},
+        {104, 0x5},
+        {98, 0x5},
+        {110, 0x4},
+        {104, 0x4},
+        {98, 0x4},
+        {110, 0x3},
+        {104, 0x3},
+        {98, 0x3},
+        {110, 0x2},
+        {104, 0x2},
+        {98, 0x2},
+        {110, 0x1},
+        {104, 0x1},
+        {98, 0x1},
+        {110, 0x0},
+        {104, 0x0},
+        {98, 0x0},
+        {97, 0},
+        {96, 0},
+        {95, 0},
+        {94, 0},
+        {93, 0},
+        {92, 0},
+        {91, 0},
+        {90, 0},
+        {89, 0},
+        {88, 0},
+        {87, 0},
+        {86, 0},
+        {85, 0},
+        {84, 0},
+        {83, 0},
+        {82, 0},
+        {81, 0},
+        {80, 0},
+        {79, 0},
+        {78, 0},
+        {77, 0},
+        {76, 0},
+        {75, 0},
+        {74, 0},
+        {73, 0},
+        {72, 0},
+        {71, 0},
+        {70, 0},
+        {69, 0},
+        {68, 0},
+        {67, 0},
+        {66, 0},
+        {65, 0},
+        {64, 0},
+        {63, 0},
+        {62, 0},
+        {61, 0},
+        {60, 0},
+        {59, 0},
+        }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+                       u8 ctrl_chan_high,
+                       struct il4965_tx_power_db *tx_power_tbl)
+{
+       u8 saturation_power;
+       s32 target_power;
+       s32 user_target_power;
+       s32 power_limit;
+       s32 current_temp;
+       s32 reg_limit;
+       s32 current_regulatory;
+       s32 txatten_grp = CALIB_CH_GROUP_MAX;
+       int i;
+       int c;
+       const struct il_channel_info *ch_info = NULL;
+       struct il_eeprom_calib_ch_info ch_eeprom_info;
+       const struct il_eeprom_calib_measure *measurement;
+       s16 voltage;
+       s32 init_voltage;
+       s32 voltage_compensation;
+       s32 degrees_per_05db_num;
+       s32 degrees_per_05db_denom;
+       s32 factory_temp;
+       s32 temperature_comp[2];
+       s32 factory_gain_idx[2];
+       s32 factory_actual_pwr[2];
+       s32 power_idx;
+
+       /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+        *   are used for idxing into txpower table) */
+       user_target_power = 2 * il->tx_power_user_lmt;
+
+       /* Get current (RXON) channel, band, width */
+       D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+       ch_info = il_get_channel_info(il, il->band, channel);
+
+       if (!il_is_channel_valid(ch_info))
+               return -EINVAL;
+
+       /* get txatten group, used to select 1) thermal txpower adjustment
+        *   and 2) mimo txpower balance between Tx chains. */
+       txatten_grp = il4965_get_tx_atten_grp(channel);
+       if (txatten_grp < 0) {
+               IL_ERR("Can't find txatten group for channel %d.\n", channel);
+               return txatten_grp;
+       }
+
+       D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+                 txatten_grp);
+
+       if (is_ht40) {
+               if (ctrl_chan_high)
+                       channel -= 2;
+               else
+                       channel += 2;
+       }
+
+       /* hardware txpower limits ...
+        * saturation (clipping distortion) txpowers are in half-dBm */
+       if (band)
+               saturation_power = il->calib_info->saturation_power24;
+       else
+               saturation_power = il->calib_info->saturation_power52;
+
+       if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+           saturation_power > IL_TX_POWER_SATURATION_MAX) {
+               if (band)
+                       saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+               else
+                       saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+       }
+
+       /* regulatory txpower limits ... reg_limit values are in half-dBm,
+        *   max_power_avg values are in dBm, convert * 2 */
+       if (is_ht40)
+               reg_limit = ch_info->ht40_max_power_avg * 2;
+       else
+               reg_limit = ch_info->max_power_avg * 2;
+
+       if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+           (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+               if (band)
+                       reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+               else
+                       reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+       }
+
+       /* Interpolate txpower calibration values for this channel,
+        *   based on factory calibration tests on spaced channels. */
+       il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+       /* calculate tx gain adjustment based on power supply voltage */
+       voltage = le16_to_cpu(il->calib_info->voltage);
+       init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+       voltage_compensation =
+           il4965_get_voltage_compensation(voltage, init_voltage);
+
+       D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+                 voltage, voltage_compensation);
+
+       /* get current temperature (Celsius) */
+       current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+       current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+       current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+       /* select thermal txpower adjustment params, based on channel group
+        *   (same frequency group used for mimo txatten adjustment) */
+       degrees_per_05db_num =
+           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+       degrees_per_05db_denom =
+           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+       /* get per-chain txpower values from factory measurements */
+       for (c = 0; c < 2; c++) {
+               measurement = &ch_eeprom_info.measurements[c][1];
+
+               /* txgain adjustment (in half-dB steps) based on difference
+                *   between factory and current temperature */
+               factory_temp = measurement->temperature;
+               il4965_math_div_round((current_temp -
+                                      factory_temp) * degrees_per_05db_denom,
+                                     degrees_per_05db_num,
+                                     &temperature_comp[c]);
+
+               factory_gain_idx[c] = measurement->gain_idx;
+               factory_actual_pwr[c] = measurement->actual_pow;
+
+               D_TXPOWER("chain = %d\n", c);
+               D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+                         factory_temp, current_temp, temperature_comp[c]);
+
+               D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+                         factory_actual_pwr[c]);
+       }
+
+       /* for each of 33 bit-rates (including 1 for CCK) */
+       for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+               u8 is_mimo_rate;
+               union il4965_tx_power_dual_stream tx_power;
+
+               /* for mimo, reduce each chain's txpower by half
+                * (3dB, 6 steps), so total output power is regulatory
+                * compliant. */
+               if (i & 0x8) {
+                       current_regulatory =
+                           reg_limit -
+                           IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+                       is_mimo_rate = 1;
+               } else {
+                       current_regulatory = reg_limit;
+                       is_mimo_rate = 0;
+               }
+
+               /* find txpower limit, either hardware or regulatory */
+               power_limit = saturation_power - back_off_table[i];
+               if (power_limit > current_regulatory)
+                       power_limit = current_regulatory;
+
+               /* reduce user's txpower request if necessary
+                * for this rate on this channel */
+               target_power = user_target_power;
+               if (target_power > power_limit)
+                       target_power = power_limit;
+
+               D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+                         saturation_power - back_off_table[i],
+                         current_regulatory, user_target_power, target_power);
+
+               /* for each of 2 Tx chains (radio transmitters) */
+               for (c = 0; c < 2; c++) {
+                       s32 atten_value;
+
+                       if (is_mimo_rate)
+                               atten_value =
+                                   (s32) le32_to_cpu(il->card_alive_init.
+                                                     tx_atten[txatten_grp][c]);
+                       else
+                               atten_value = 0;
+
+                       /* calculate idx; higher idx means lower txpower */
+                       power_idx =
+                           (u8) (factory_gain_idx[c] -
+                                 (target_power - factory_actual_pwr[c]) -
+                                 temperature_comp[c] - voltage_compensation +
+                                 atten_value);
+
+/*                     D_TXPOWER("calculated txpower idx %d\n",
+                                               power_idx); */
+
+                       if (power_idx < get_min_power_idx(i, band))
+                               power_idx = get_min_power_idx(i, band);
+
+                       /* adjust 5 GHz idx to support negative idxes */
+                       if (!band)
+                               power_idx += 9;
+
+                       /* CCK, rate 32, reduce txpower for CCK */
+                       if (i == POWER_TBL_CCK_ENTRY)
+                               power_idx +=
+                                   IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+                       /* stay within the table! */
+                       if (power_idx > 107) {
+                               IL_WARN("txpower idx %d > 107\n", power_idx);
+                               power_idx = 107;
+                       }
+                       if (power_idx < 0) {
+                               IL_WARN("txpower idx %d < 0\n", power_idx);
+                               power_idx = 0;
+                       }
+
+                       /* fill txpower command for this rate/chain */
+                       tx_power.s.radio_tx_gain[c] =
+                           gain_table[band][power_idx].radio;
+                       tx_power.s.dsp_predis_atten[c] =
+                           gain_table[band][power_idx].dsp;
+
+                       D_TXPOWER("chain %d mimo %d idx %d "
+                                 "gain 0x%02x dsp %d\n", c, atten_value,
+                                 power_idx, tx_power.s.radio_tx_gain[c],
+                                 tx_power.s.dsp_predis_atten[c]);
+               }               /* for each chain */
+
+               tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+       }                       /* for each rate */
+
+       return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+       struct il4965_txpowertable_cmd cmd = { 0 };
+       int ret;
+       u8 band = 0;
+       bool is_ht40 = false;
+       u8 ctrl_chan_high = 0;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       if (WARN_ONCE
+           (test_bit(S_SCAN_HW, &il->status),
+            "TX Power requested while scanning!\n"))
+               return -EAGAIN;
+
+       band = il->band == IEEE80211_BAND_2GHZ;
+
+       is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+       if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+               ctrl_chan_high = 1;
+
+       cmd.band = band;
+       cmd.channel = ctx->active.channel;
+
+       ret =
+           il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+                                   is_ht40, ctrl_chan_high, &cmd.tx_power);
+       if (ret)
+               goto out;
+
+       ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+       return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       int ret = 0;
+       struct il4965_rxon_assoc_cmd rxon_assoc;
+       const struct il_rxon_cmd *rxon1 = &ctx->staging;
+       const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+       if (rxon1->flags == rxon2->flags &&
+           rxon1->filter_flags == rxon2->filter_flags &&
+           rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+           rxon1->ofdm_ht_single_stream_basic_rates ==
+           rxon2->ofdm_ht_single_stream_basic_rates &&
+           rxon1->ofdm_ht_dual_stream_basic_rates ==
+           rxon2->ofdm_ht_dual_stream_basic_rates &&
+           rxon1->rx_chain == rxon2->rx_chain &&
+           rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+               D_INFO("Using current RXON_ASSOC.  Not resending.\n");
+               return 0;
+       }
+
+       rxon_assoc.flags = ctx->staging.flags;
+       rxon_assoc.filter_flags = ctx->staging.filter_flags;
+       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+       rxon_assoc.reserved = 0;
+       rxon_assoc.ofdm_ht_single_stream_basic_rates =
+           ctx->staging.ofdm_ht_single_stream_basic_rates;
+       rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+           ctx->staging.ofdm_ht_dual_stream_basic_rates;
+       rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+       ret =
+           il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+                                 &rxon_assoc, NULL);
+
+       return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       /* cast away the const for active_rxon in this function */
+       struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+       int ret;
+       bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+       if (!il_is_alive(il))
+               return -EBUSY;
+
+       if (!ctx->is_active)
+               return 0;
+
+       /* always get timestamp with Rx frame */
+       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+       ret = il_check_rxon_cmd(il, ctx);
+       if (ret) {
+               IL_ERR("Invalid RXON configuration.  Not committing.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * receive commit_rxon request
+        * abort any previous channel switch if still in process
+        */
+       if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+           il->switch_channel != ctx->staging.channel) {
+               D_11H("abort channel switch on %d\n",
+                     le16_to_cpu(il->switch_channel));
+               il_chswitch_done(il, false);
+       }
+
+       /* If we don't need to send a full RXON, we can use
+        * il_rxon_assoc_cmd which is used to reconfigure filter
+        * and other flags for the current radio configuration. */
+       if (!il_full_rxon_required(il, ctx)) {
+               ret = il_send_rxon_assoc(il, ctx);
+               if (ret) {
+                       IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+                       return ret;
+               }
+
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+               il_print_rx_config_cmd(il, ctx);
+               /*
+                * We do not commit tx power settings while channel changing,
+                * do it now if tx power changed.
+                */
+               il_set_tx_power(il, il->tx_power_next, false);
+               return 0;
+       }
+
+       /* If we are currently associated and the new config requires
+        * an RXON_ASSOC and the new config wants the associated mask enabled,
+        * we must clear the associated from the active configuration
+        * before we apply the new config */
+       if (il_is_associated_ctx(ctx) && new_assoc) {
+               D_INFO("Toggling associated bit on current RXON\n");
+               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+               ret =
+                   il_send_cmd_pdu(il, ctx->rxon_cmd,
+                                   sizeof(struct il_rxon_cmd), active_rxon);
+
+               /* If the mask clearing failed then we set
+                * active_rxon back to what it was previously */
+               if (ret) {
+                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+                       IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+                       return ret;
+               }
+               il_clear_ucode_stations(il, ctx);
+               il_restore_stations(il, ctx);
+               ret = il4965_restore_default_wep_keys(il, ctx);
+               if (ret) {
+                       IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+                       return ret;
+               }
+       }
+
+       D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+              "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+              le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+
+       il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+
+       /* Apply the new configuration
+        * RXON unassoc clears the station table in uCode so restoration of
+        * stations is needed after it (the RXON command) completes
+        */
+       if (!new_assoc) {
+               ret =
+                   il_send_cmd_pdu(il, ctx->rxon_cmd,
+                                   sizeof(struct il_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IL_ERR("Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               D_INFO("Return from !new_assoc RXON.\n");
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+               il_clear_ucode_stations(il, ctx);
+               il_restore_stations(il, ctx);
+               ret = il4965_restore_default_wep_keys(il, ctx);
+               if (ret) {
+                       IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+                       return ret;
+               }
+       }
+       if (new_assoc) {
+               il->start_calib = 0;
+               /* Apply the new configuration
+                * RXON assoc doesn't clear the station table in uCode,
+                */
+               ret =
+                   il_send_cmd_pdu(il, ctx->rxon_cmd,
+                                   sizeof(struct il_rxon_cmd), &ctx->staging);
+               if (ret) {
+                       IL_ERR("Error setting new RXON (%d)\n", ret);
+                       return ret;
+               }
+               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+       }
+       il_print_rx_config_cmd(il, ctx);
+
+       il4965_init_sensitivity(il);
+
+       /* If we issue a new RXON command which required a tune then we must
+        * send a new TXPOWER command or we won't be able to Tx any frames */
+       ret = il_set_tx_power(il, il->tx_power_next, true);
+       if (ret) {
+               IL_ERR("Error sending TX power (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+                        struct ieee80211_channel_switch *ch_switch)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       int rc;
+       u8 band = 0;
+       bool is_ht40 = false;
+       u8 ctrl_chan_high = 0;
+       struct il4965_channel_switch_cmd cmd;
+       const struct il_channel_info *ch_info;
+       u32 switch_time_in_usec, ucode_switch_time;
+       u16 ch;
+       u32 tsf_low;
+       u8 switch_count;
+       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+       struct ieee80211_vif *vif = ctx->vif;
+       band = il->band == IEEE80211_BAND_2GHZ;
+
+       is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+       if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+               ctrl_chan_high = 1;
+
+       cmd.band = band;
+       cmd.expect_beacon = 0;
+       ch = ch_switch->channel->hw_value;
+       cmd.channel = cpu_to_le16(ch);
+       cmd.rxon_flags = ctx->staging.flags;
+       cmd.rxon_filter_flags = ctx->staging.filter_flags;
+       switch_count = ch_switch->count;
+       tsf_low = ch_switch->timestamp & 0x0ffffffff;
+       /*
+        * calculate the ucode channel switch time
+        * adding TSF as one of the factor for when to switch
+        */
+       if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+               if (switch_count >
+                   ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+                       switch_count -=
+                           (il->ucode_beacon_time - tsf_low) / beacon_interval;
+               } else
+                       switch_count = 0;
+       }
+       if (switch_count <= 1)
+               cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+       else {
+               switch_time_in_usec =
+                   vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+               ucode_switch_time =
+                   il_usecs_to_beacons(il, switch_time_in_usec,
+                                       beacon_interval);
+               cmd.switch_time =
+                   il_add_beacon_time(il, il->ucode_beacon_time,
+                                      ucode_switch_time, beacon_interval);
+       }
+       D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+       ch_info = il_get_channel_info(il, il->band, ch);
+       if (ch_info)
+               cmd.expect_beacon = il_is_channel_radar(ch_info);
+       else {
+               IL_ERR("invalid channel switch from %u to %u\n",
+                      ctx->active.channel, ch);
+               return -EFAULT;
+       }
+
+       rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+                                    &cmd.tx_power);
+       if (rc) {
+               D_11H("error:%d  fill txpower_tbl\n", rc);
+               return rc;
+       }
+
+       return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+                              u16 byte_cnt)
+{
+       struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+       int txq_id = txq->q.id;
+       int write_ptr = txq->q.write_ptr;
+       int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+       __le16 bc_ent;
+
+       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+       bc_ent = cpu_to_le16(len & 0xFFF);
+       /* Set up byte count within first 256 entries */
+       scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+       /* If within first 64 entries, duplicate at end */
+       if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+               scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+                   bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+       s32 temperature;
+       s32 vt;
+       s32 R1, R2, R3;
+       u32 R4;
+
+       if (test_bit(S_TEMPERATURE, &il->status) &&
+           (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+               D_TEMP("Running HT40 temperature calibration\n");
+               R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+               R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+               R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+               R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+       } else {
+               D_TEMP("Running temperature calibration\n");
+               R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+               R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+               R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+               R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+       }
+
+       /*
+        * Temperature is only 23 bits, so sign extend out to 32.
+        *
+        * NOTE If we haven't received a stats notification yet
+        * with an updated temperature, use R4 provided to us in the
+        * "initialize" ALIVE response.
+        */
+       if (!test_bit(S_TEMPERATURE, &il->status))
+               vt = sign_extend32(R4, 23);
+       else
+               vt = sign_extend32(le32_to_cpu
+                                  (il->_4965.stats.general.common.temperature),
+                                  23);
+
+       D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+       if (R3 == R1) {
+               IL_ERR("Calibration conflict R1 == R3\n");
+               return -1;
+       }
+
+       /* Calculate temperature in degrees Kelvin, adjust by 97%.
+        * Add offset to center the adjustment around 0 degrees Centigrade. */
+       temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+       temperature /= (R3 - R1);
+       temperature =
+           (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+       D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+              KELVIN_TO_CELSIUS(temperature));
+
+       return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD   3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+       int temp_diff;
+
+       if (!test_bit(S_STATS, &il->status)) {
+               D_TEMP("Temperature not updated -- no stats.\n");
+               return 0;
+       }
+
+       temp_diff = il->temperature - il->last_temperature;
+
+       /* get absolute value */
+       if (temp_diff < 0) {
+               D_POWER("Getting cooler, delta %d\n", temp_diff);
+               temp_diff = -temp_diff;
+       } else if (temp_diff == 0)
+               D_POWER("Temperature unchanged\n");
+       else
+               D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+       if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+               D_POWER(" => thermal txpower calib not needed\n");
+               return 0;
+       }
+
+       D_POWER(" => thermal txpower calib needed\n");
+
+       return 1;
+}
+
+static void
+il4965_temperature_calib(struct il_priv *il)
+{
+       s32 temp;
+
+       temp = il4965_hw_get_temperature(il);
+       if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+               return;
+
+       if (il->temperature != temp) {
+               if (il->temperature)
+                       D_TEMP("Temperature changed " "from %dC to %dC\n",
+                              KELVIN_TO_CELSIUS(il->temperature),
+                              KELVIN_TO_CELSIUS(temp));
+               else
+                       D_TEMP("Temperature " "initialized to %dC\n",
+                              KELVIN_TO_CELSIUS(temp));
+       }
+
+       il->temperature = temp;
+       set_bit(S_TEMPERATURE, &il->status);
+
+       if (!il->disable_tx_power_cal &&
+           unlikely(!test_bit(S_SCANNING, &il->status)) &&
+           il4965_is_temp_calib_needed(il))
+               queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+       switch (cmd_id) {
+       case C_RXON:
+               return (u16) sizeof(struct il4965_rxon_cmd);
+       default:
+               return len;
+       }
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+       struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+       addsta->mode = cmd->mode;
+       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+       memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+       addsta->station_flags = cmd->station_flags;
+       addsta->station_flags_msk = cmd->station_flags_msk;
+       addsta->tid_disable_tx = cmd->tid_disable_tx;
+       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+       addsta->sleep_tx_count = cmd->sleep_tx_count;
+       addsta->reserved1 = cpu_to_le16(0);
+       addsta->reserved2 = cpu_to_le16(0);
+
+       return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+       return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+       status &= TX_STATUS_MSK;
+
+       switch (status) {
+       case TX_STATUS_SUCCESS:
+       case TX_STATUS_DIRECT_DONE:
+               return IEEE80211_TX_STAT_ACK;
+       case TX_STATUS_FAIL_DEST_PS:
+               return IEEE80211_TX_STAT_TX_FILTERED;
+       default:
+               return 0;
+       }
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+       status &= TX_STATUS_MSK;
+       return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+                         struct il4965_tx_resp *tx_resp, int txq_id,
+                         u16 start_idx)
+{
+       u16 status;
+       struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+       struct ieee80211_tx_info *info = NULL;
+       struct ieee80211_hdr *hdr = NULL;
+       u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+       int i, sh, idx;
+       u16 seq;
+       if (agg->wait_for_ba)
+               D_TX_REPLY("got tx response w/o block-ack\n");
+
+       agg->frame_count = tx_resp->frame_count;
+       agg->start_idx = start_idx;
+       agg->rate_n_flags = rate_n_flags;
+       agg->bitmap = 0;
+
+       /* num frames attempted by Tx command */
+       if (agg->frame_count == 1) {
+               /* Only one frame was attempted; no block-ack will arrive */
+               status = le16_to_cpu(frame_status[0].status);
+               idx = start_idx;
+
+               D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+                          agg->frame_count, agg->start_idx, idx);
+
+               info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
+               info->status.rates[0].count = tx_resp->failure_frame + 1;
+               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+               info->flags |= il4965_tx_status_to_mac80211(status);
+               il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+               D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+                          tx_resp->failure_frame);
+               D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+               agg->wait_for_ba = 0;
+       } else {
+               /* Two or more frames were attempted; expect block-ack */
+               u64 bitmap = 0;
+               int start = agg->start_idx;
+
+               /* Construct bit-map of pending frames within Tx win */
+               for (i = 0; i < agg->frame_count; i++) {
+                       u16 sc;
+                       status = le16_to_cpu(frame_status[i].status);
+                       seq = le16_to_cpu(frame_status[i].sequence);
+                       idx = SEQ_TO_IDX(seq);
+                       txq_id = SEQ_TO_QUEUE(seq);
+
+                       if (status &
+                           (AGG_TX_STATE_FEW_BYTES_MSK |
+                            AGG_TX_STATE_ABORT_MSK))
+                               continue;
+
+                       D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+                                  agg->frame_count, txq_id, idx);
+
+                       hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+                       if (!hdr) {
+                               IL_ERR("BUG_ON idx doesn't point to valid skb"
+                                      " idx=%d, txq_id=%d\n", idx, txq_id);
+                               return -1;
+                       }
+
+                       sc = le16_to_cpu(hdr->seq_ctrl);
+                       if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+                               IL_ERR("BUG_ON idx doesn't match seq control"
+                                      " idx=%d, seq_idx=%d, seq=%d\n", idx,
+                                      SEQ_TO_SN(sc), hdr->seq_ctrl);
+                               return -1;
+                       }
+
+                       D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+                                  SEQ_TO_SN(sc));
+
+                       sh = idx - start;
+                       if (sh > 64) {
+                               sh = (start - idx) + 0xff;
+                               bitmap = bitmap << sh;
+                               sh = 0;
+                               start = idx;
+                       } else if (sh < -64)
+                               sh = 0xff - (start - idx);
+                       else if (sh < 0) {
+                               sh = start - idx;
+                               start = idx;
+                               bitmap = bitmap << sh;
+                               sh = 0;
+                       }
+                       bitmap |= 1ULL << sh;
+                       D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+                                  (unsigned long long)bitmap);
+               }
+
+               agg->bitmap = bitmap;
+               agg->start_idx = start;
+               D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+                          agg->frame_count, agg->start_idx,
+                          (unsigned long long)agg->bitmap);
+
+               if (bitmap)
+                       agg->wait_for_ba = 1;
+       }
+       return 0;
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 * addr)
+{
+       int i;
+       int start = 0;
+       int ret = IL_INVALID_STATION;
+       unsigned long flags;
+
+       if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
+               start = IL_STA_ID;
+
+       if (is_broadcast_ether_addr(addr))
+               return il->ctx.bcast_sta_id;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       for (i = start; i < il->hw_params.max_stations; i++)
+               if (il->stations[i].used &&
+                   (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+                       ret = i;
+                       goto out;
+               }
+
+       D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+       /*
+        * It may be possible that more commands interacting with stations
+        * arrive before we completed processing the adding of
+        * station
+        */
+       if (ret != IL_INVALID_STATION &&
+           (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+            ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+             (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+               IL_ERR("Requested station info for sta %d before ready.\n",
+                      ret);
+               ret = IL_INVALID_STATION;
+       }
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+       return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+       if (il->iw_mode == NL80211_IFTYPE_STATION) {
+               return IL_AP_ID;
+       } else {
+               u8 *da = ieee80211_get_DA(hdr);
+               return il4965_find_station(il, da);
+       }
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int idx = SEQ_TO_IDX(sequence);
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_tx_info *info;
+       struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+       u32 status = le32_to_cpu(tx_resp->u.status);
+       int uninitialized_var(tid);
+       int sta_id;
+       int freed;
+       u8 *qc = NULL;
+       unsigned long flags;
+
+       if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+               IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+                      "is out of range [0-%d] %d %d\n", txq_id, idx,
+                      txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+               return;
+       }
+
+       txq->time_stamp = jiffies;
+       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+       memset(&info->status, 0, sizeof(info->status));
+
+       hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & 0xf;
+       }
+
+       sta_id = il4965_get_ra_sta_id(il, hdr);
+       if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+               IL_ERR("Station not known\n");
+               return;
+       }
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       if (txq->sched_retry) {
+               const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+               struct il_ht_agg *agg = NULL;
+               WARN_ON(!qc);
+
+               agg = &il->stations[sta_id].tid[tid].agg;
+
+               il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+               /* check if BAR is needed */
+               if ((tx_resp->frame_count == 1) &&
+                   !il4965_is_tx_success(status))
+                       info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+               if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+                       idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+                       D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+                                  "%d idx %d\n", scd_ssn, idx);
+                       freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+                       if (qc)
+                               il4965_free_tfds_in_queue(il, sta_id, tid,
+                                                         freed);
+
+                       if (il->mac80211_registered &&
+                           il_queue_space(&txq->q) > txq->q.low_mark &&
+                           agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+                               il_wake_queue(il, txq);
+               }
+       } else {
+               info->status.rates[0].count = tx_resp->failure_frame + 1;
+               info->flags |= il4965_tx_status_to_mac80211(status);
+               il4965_hwrate_to_tx_control(il,
+                                           le32_to_cpu(tx_resp->rate_n_flags),
+                                           info);
+
+               D_TX_REPLY("TXQ %d status %s (0x%08x) "
+                          "rate_n_flags 0x%x retries %d\n", txq_id,
+                          il4965_get_tx_fail_reason(status), status,
+                          le32_to_cpu(tx_resp->rate_n_flags),
+                          tx_resp->failure_frame);
+
+               freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+               if (qc && likely(sta_id != IL_INVALID_STATION))
+                       il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+               else if (sta_id == IL_INVALID_STATION)
+                       D_TX_REPLY("Station not known\n");
+
+               if (il->mac80211_registered &&
+                   il_queue_space(&txq->q) > txq->q.low_mark)
+                       il_wake_queue(il, txq);
+       }
+       if (qc && likely(sta_id != IL_INVALID_STATION))
+               il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+       il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il4965_beacon_notif *beacon = (void *)pkt->u.raw;
+       u8 rate __maybe_unused =
+           il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+       D_RX("beacon status %#x, retries:%d ibssmgr:%d "
+            "tsf:0x%.8x%.8x rate:%d\n",
+            le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+            beacon->beacon_notify_hdr.failure_frame,
+            le32_to_cpu(beacon->ibss_mgr_status),
+            le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+
+       il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void
+il4965_handler_setup(struct il_priv *il)
+{
+       /* Legacy Rx frames */
+       il->handlers[N_RX] = il4965_hdl_rx;
+       /* Tx response */
+       il->handlers[C_TX] = il4965_hdl_tx;
+       il->handlers[N_BEACON] = il4965_hdl_beacon;
+}
+
+static struct il_hcmd_ops il4965_hcmd = {
+       .rxon_assoc = il4965_send_rxon_assoc,
+       .commit_rxon = il4965_commit_rxon,
+       .set_rxon_chain = il4965_set_rxon_chain,
+};
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+
+       /*
+        * Since setting the RXON may have been deferred while
+        * performing the scan, fire one off if needed
+        */
+       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+               il_commit_rxon(il, ctx);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       struct ieee80211_vif *vif = ctx->vif;
+       struct ieee80211_conf *conf = NULL;
+       int ret = 0;
+
+       if (!vif || !il->is_open)
+               return;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       il_scan_cancel_timeout(il, 200);
+
+       conf = &il->hw->conf;
+
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       il_commit_rxon(il, ctx);
+
+       ret = il_send_rxon_timing(il, ctx);
+       if (ret)
+               IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+       il_set_rxon_ht(il, &il->current_ht_config);
+
+       if (il->cfg->ops->hcmd->set_rxon_chain)
+               il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+       D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+               vif->bss_conf.beacon_int);
+
+       if (vif->bss_conf.use_short_preamble)
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+               if (vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+       }
+
+       il_commit_rxon(il, ctx);
+
+       D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+               ctx->active.bssid_addr);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               il4965_send_beacon_cmd(il);
+               break;
+       default:
+               IL_ERR("%s Should not be called in %d mode\n", __func__,
+                      vif->type);
+               break;
+       }
+
+       /* the chain noise calibration will enabled PM upon completion
+        * If chain noise has already been run, then we need to enable
+        * power management here */
+       if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+               il_power_update_mode(il, false);
+
+       /* Enable Rx differential gain and sensitivity calibrations */
+       il4965_chain_noise_reset(il);
+       il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       struct ieee80211_vif *vif = ctx->vif;
+       int ret = 0;
+
+       lockdep_assert_held(&il->mutex);
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       /* The following should be done only at AP bring up */
+       if (!il_is_associated_ctx(ctx)) {
+
+               /* RXON - unassoc (to set timing command) */
+               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+               il_commit_rxon(il, ctx);
+
+               /* RXON Timing */
+               ret = il_send_rxon_timing(il, ctx);
+               if (ret)
+                       IL_WARN("RXON timing failed - "
+                               "Attempting to continue.\n");
+
+               /* AP has all antennas */
+               il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+               il_set_rxon_ht(il, &il->current_ht_config);
+               if (il->cfg->ops->hcmd->set_rxon_chain)
+                       il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+               ctx->staging.assoc_id = 0;
+
+               if (vif->bss_conf.use_short_preamble)
+                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+                       if (vif->bss_conf.use_short_slot)
+                               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+                       else
+                               ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+               }
+               /* need to send beacon cmd before committing assoc RXON! */
+               il4965_send_beacon_cmd(il);
+               /* restore RXON assoc */
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+               il_commit_rxon(il, ctx);
+       }
+       il4965_send_beacon_cmd(il);
+}
+
+static struct il_hcmd_utils_ops il4965_hcmd_utils = {
+       .get_hcmd_size = il4965_get_hcmd_size,
+       .build_addsta_hcmd = il4965_build_addsta_hcmd,
+       .request_scan = il4965_request_scan,
+       .post_scan = il4965_post_scan,
+};
+
+static struct il_lib_ops il4965_lib = {
+       .set_hw_params = il4965_hw_set_hw_params,
+       .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+       .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+       .txq_free_tfd = il4965_hw_txq_free_tfd,
+       .txq_init = il4965_hw_tx_queue_init,
+       .handler_setup = il4965_handler_setup,
+       .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+       .init_alive_start = il4965_init_alive_start,
+       .load_ucode = il4965_load_bsm,
+       .dump_nic_error_log = il4965_dump_nic_error_log,
+       .dump_fh = il4965_dump_fh,
+       .set_channel_switch = il4965_hw_channel_switch,
+       .apm_ops = {
+                   .init = il_apm_init,
+                   .config = il4965_nic_config,
+                   },
+       .eeprom_ops = {
+                      .regulatory_bands = {
+                                           EEPROM_REGULATORY_BAND_1_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_2_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_3_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_4_CHANNELS,
+                                           EEPROM_REGULATORY_BAND_5_CHANNELS,
+                                           EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+                                           EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
+                      .acquire_semaphore = il4965_eeprom_acquire_semaphore,
+                      .release_semaphore = il4965_eeprom_release_semaphore,
+                      },
+       .send_tx_power = il4965_send_tx_power,
+       .update_chain_flags = il4965_update_chain_flags,
+       .temp_ops = {
+                    .temperature = il4965_temperature_calib,
+                    },
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       .debugfs_ops = {
+                       .rx_stats_read = il4965_ucode_rx_stats_read,
+                       .tx_stats_read = il4965_ucode_tx_stats_read,
+                       .general_stats_read = il4965_ucode_general_stats_read,
+                       },
+#endif
+};
+
+static const struct il_legacy_ops il4965_legacy_ops = {
+       .post_associate = il4965_post_associate,
+       .config_ap = il4965_config_ap,
+       .manage_ibss_station = il4965_manage_ibss_station,
+       .update_bcast_stations = il4965_update_bcast_stations,
+};
+
+struct ieee80211_ops il4965_hw_ops = {
+       .tx = il4965_mac_tx,
+       .start = il4965_mac_start,
+       .stop = il4965_mac_stop,
+       .add_interface = il_mac_add_interface,
+       .remove_interface = il_mac_remove_interface,
+       .change_interface = il_mac_change_interface,
+       .config = il_mac_config,
+       .configure_filter = il4965_configure_filter,
+       .set_key = il4965_mac_set_key,
+       .update_tkip_key = il4965_mac_update_tkip_key,
+       .conf_tx = il_mac_conf_tx,
+       .reset_tsf = il_mac_reset_tsf,
+       .bss_info_changed = il_mac_bss_info_changed,
+       .ampdu_action = il4965_mac_ampdu_action,
+       .hw_scan = il_mac_hw_scan,
+       .sta_add = il4965_mac_sta_add,
+       .sta_remove = il_mac_sta_remove,
+       .channel_switch = il4965_mac_channel_switch,
+       .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static const struct il_ops il4965_ops = {
+       .lib = &il4965_lib,
+       .hcmd = &il4965_hcmd,
+       .utils = &il4965_hcmd_utils,
+       .led = &il4965_led_ops,
+       .legacy = &il4965_legacy_ops,
+       .ieee80211_ops = &il4965_hw_ops,
+};
+
+static struct il_base_params il4965_base_params = {
+       .eeprom_size = IL4965_EEPROM_IMG_SIZE,
+       .num_of_queues = IL49_NUM_QUEUES,
+       .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+       .pll_cfg_val = 0,
+       .set_l0s = true,
+       .use_bsm = true,
+       .led_compensation = 61,
+       .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+       .wd_timeout = IL_DEF_WD_TIMEOUT,
+       .temperature_kelvin = true,
+       .ucode_tracing = true,
+       .sensitivity_calib_by_driver = true,
+       .chain_noise_calib_by_driver = true,
+};
+
+struct il_cfg il4965_cfg = {
+       .name = "Intel(R) Wireless WiFi Link 4965AGN",
+       .fw_name_pre = IL4965_FW_PRE,
+       .ucode_api_max = IL4965_UCODE_API_MAX,
+       .ucode_api_min = IL4965_UCODE_API_MIN,
+       .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+       .valid_tx_ant = ANT_AB,
+       .valid_rx_ant = ANT_ABC,
+       .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+       .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+       .ops = &il4965_ops,
+       .mod_params = &il4965_mod_params,
+       .base_params = &il4965_base_params,
+       .led_mode = IL_LED_BLINK,
+       /*
+        * Force use of chains B and C for scan RX on 5 GHz band
+        * because the device has off-channel reception on chain A.
+        */
+       .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644 (file)
index 0000000..7447231
--- /dev/null
@@ -0,0 +1,1309 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+
+extern struct il_mod_params il4965_mod_params;
+
+extern struct ieee80211_ops il4965_hw_ops;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+                              int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+                                   dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+                                struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+                       struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE:  Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+                               int tx_fifo_id, int scd_retry);
+
+u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant_idx, u8 valid);
+
+/* rx */
+void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
+bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
+void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+                              bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+       return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
+int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+                            const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+                                 struct il_rxon_context *ctx,
+                                 struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+                              struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il,
+                                   struct il_rxon_context *ctx);
+int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+                          struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+                             struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+                           struct ieee80211_key_conf *keyconf,
+                           struct ieee80211_sta *sta, u32 iv32,
+                           u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+                           int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+                          int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+       return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+static inline __le32
+il4965_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+       return cpu_to_le32(flags | (u32) rate);
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                      struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_key_conf *keyconf,
+                               struct ieee80211_sta *sta, u32 iv32,
+                               u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           enum ieee80211_ampdu_mlme_action action,
+                           struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+                           u8 buf_size);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
+void il4965_mac_channel_switch(struct ieee80211_hw *hw,
+                              struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE                 1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE 7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND              (0x000000)
+#define IL49_RTC_INST_UPPER_BOUND              (0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND              (0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND              (0x80A000)
+
+#define IL49_RTC_INST_SIZE  (IL49_RTC_INST_UPPER_BOUND - \
+                               IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE  (IL49_RTC_DATA_UPPER_BOUND - \
+                               IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+       return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+               addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent).  The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp).  After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
+ *        must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN  (263)
+#define IL_TX_POWER_TEMPERATURE_MAX  (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+       ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+        (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ *     1) EEPROM
+ *     2) "initialize" alive notification
+ *     3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1)  Regulatory information (max txpower and channel usage flags) is provided
+ *     separately for each channel that can possibly supported by 4965.
+ *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ *     (legacy) channels.
+ *
+ *     See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ *     for locations in EEPROM.
+ *
+ * 2)  Factory txpower calibration information is provided separately for
+ *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
+ *     but 5 GHz has several sub-bands.
+ *
+ *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ *     See struct il4965_eeprom_calib_info (and the tree of structures
+ *     contained within it) for format, and struct il4965_eeprom for
+ *     locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1)  Temperature calculation parameters.
+ *
+ * 2)  Power supply voltage measurement.
+ *
+ * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1)  Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ *     Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ *     If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ *     2 transmitters will be used simultaneously; driver must reduce the
+ *     regulatory limit by 3 dB (half-power) for each transmitter, so the
+ *     combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
+ *     reduce target txpower if necessary.
+ *
+ *     Backoff values below are in 1/2 dB units (equivalent to steps in
+ *     txpower gain tables):
+ *
+ *     OFDM 6 - 36 MBit:  10 steps (5 dB)
+ *     OFDM 48 MBit:      15 steps (7.5 dB)
+ *     OFDM 54 MBit:      17 steps (8.5 dB)
+ *     OFDM 60 MBit:      20 steps (10 dB)
+ *     CCK all rates:     10 steps (5 dB)
+ *
+ *     Backoff values apply to saturation txpower on a per-transmitter basis;
+ *     when using MIMO (2 transmitters), each transmitter uses the same
+ *     saturation level provided in EEPROM, and the same backoff values;
+ *     no reduction (such as with regulatory txpower limits) is required.
+ *
+ *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ *     factory measurement for ht40 channels.
+ *
+ *     The result of this step is the final target txpower.  The rest of
+ *     the steps figure out the proper settings for the device to achieve
+ *     that target txpower.
+ *
+ *
+ * 3)  Determine (EEPROM) calibration sub band for the target channel, by
+ *     comparing against first and last channels in each sub band
+ *     (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
+ *     referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ *     Interpolation is based on difference between target channel's frequency
+ *     and the sample channels' frequencies.  Since channel numbers are based
+ *     on frequency (5 MHz between each channel number), this is equivalent
+ *     to interpolating based on channel number differences.
+ *
+ *     Note that the sample channels may or may not be the channels at the
+ *     edges of the sub band.  The target channel may be "outside" of the
+ *     span of the sampled channels.
+ *
+ *     Driver may choose the pair (for 2 Tx chains) of measurements (see
+ *     struct il4965_eeprom_calib_ch_info) for which the actual measured
+ *     txpower comes closest to the desired txpower.  Usually, though,
+ *     the middle set of measurements is closest to the regulatory limits,
+ *     and is therefore a good choice for all txpower calculations (this
+ *     assumes that high accuracy is needed for maximizing legal txpower,
+ *     while lower txpower configurations do not need as much accuracy).
+ *
+ *     Driver should interpolate both members of the chosen measurement pair,
+ *     i.e. for both Tx chains (radio transmitters), unless the driver knows
+ *     that only one of the chains will be used (e.g. only one tx antenna
+ *     connected, but this should be unusual).  The rate scaling algorithm
+ *     switches antennas to find best performance, so both Tx chains will
+ *     be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ *     Driver should interpolate factory values for temperature, gain table
+ *     idx, and actual power.  The power amplifier detector values are
+ *     not used by the driver.
+ *
+ *     Sanity check:  If the target channel happens to be one of the sample
+ *     channels, the results should agree with the sample channel's
+ *     measurements!
+ *
+ *
+ * 5)  Find difference between desired txpower and (interpolated)
+ *     factory-measured txpower.  Using (interpolated) factory gain table idx
+ *     (shown elsewhere) as a starting point, adjust this idx lower to
+ *     increase txpower, or higher to decrease txpower, until the target
+ *     txpower is reached.  Each step in the gain table is 1/2 dB.
+ *
+ *     For example, if factory measured txpower is 16 dBm, and target txpower
+ *     is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ *     by 3 dB.
+ *
+ *
+ * 6)  Find difference between current device temperature and (interpolated)
+ *     factory-measured temperature for sub-band.  Factory values are in
+ *     degrees Celsius.  To calculate current temperature, see comments for
+ *     "4965 temperature calculation".
+ *
+ *     If current temperature is higher than factory temperature, driver must
+ *     increase gain (lower gain table idx), and vice verse.
+ *
+ *     Temperature affects gain differently for different channels:
+ *
+ *     2.4 GHz all channels:  3.5 degrees per half-dB step
+ *     5 GHz channels 34-43:  4.5 degrees per half-dB step
+ *     5 GHz channels >= 44:  4.0 degrees per half-dB step
+ *
+ *     NOTE:  Temperature can increase rapidly when transmitting, especially
+ *            with heavy traffic at high txpowers.  Driver should update
+ *            temperature calculations often under these conditions to
+ *            maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7)  Find difference between current power supply voltage indicator
+ *     (from "initialize alive") and factory-measured power supply voltage
+ *     indicator (EEPROM).
+ *
+ *     If the current voltage is higher (indicator is lower) than factory
+ *     voltage, gain should be reduced (gain table idx increased) by:
+ *
+ *     (eeprom - current) / 7
+ *
+ *     If the current voltage is lower (indicator is higher) than factory
+ *     voltage, gain should be increased (gain table idx decreased) by:
+ *
+ *     2 * (current - eeprom) / 7
+ *
+ *     If number of idx steps in either direction turns out to be > 2,
+ *     something is wrong ... just use 0.
+ *
+ *     NOTE:  Voltage compensation is independent of band/channel.
+ *
+ *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
+ *            to be constant after this initial measurement.  Voltage
+ *            compensation for txpower (number of steps in gain table)
+ *            may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8)  If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ *     adjust txpower for each transmitter chain, so txpower is balanced
+ *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
+ *     values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ *     Group 0:  5 GHz channel 34-43
+ *     Group 1:  5 GHz channel 44-70
+ *     Group 2:  5 GHz channel 71-124
+ *     Group 3:  5 GHz channel 125-200
+ *     Group 4:  2.4 GHz all channels
+ *
+ *     Add the tx_atten[group][chain] value to the idx for the target chain.
+ *     The values are signed, but are in pairs of 0 and a non-negative number,
+ *     so as to reduce gain (if necessary) of the "hotter" channel.  This
+ *     avoids any need to double-check for regulatory compliance after
+ *     this step.
+ *
+ *
+ * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ *     value to the idx:
+ *
+ *     Hardware rev B:  9 steps (4.5 dB)
+ *     Hardware rev C:  5 steps (2.5 dB)
+ *
+ *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ *     bits [3:2], 1 = B, 2 = C.
+ *
+ *     NOTE:  This compensation is in addition to any saturation backoff that
+ *            might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ *     Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ *     location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device.  That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V   (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
+ * are *relative* steps, not indications of absolute output power.  Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower.  This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration).  A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX                (0)     /* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT      (-9)    /* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *   0        110         0x3f      (highest gain)
+ *   1        104         0x3f
+ *   2         98         0x3f
+ *   3        110         0x3e
+ *   4        104         0x3e
+ *   5         98         0x3e
+ *   6        110         0x3d
+ *   7        104         0x3d
+ *   8         98         0x3d
+ *   9        110         0x3c
+ *  10        104         0x3c
+ *  11         98         0x3c
+ *  12        110         0x3b
+ *  13        104         0x3b
+ *  14         98         0x3b
+ *  15        110         0x3a
+ *  16        104         0x3a
+ *  17         98         0x3a
+ *  18        110         0x39
+ *  19        104         0x39
+ *  20         98         0x39
+ *  21        110         0x38
+ *  22        104         0x38
+ *  23         98         0x38
+ *  24        110         0x37
+ *  25        104         0x37
+ *  26         98         0x37
+ *  27        110         0x36
+ *  28        104         0x36
+ *  29         98         0x36
+ *  30        110         0x35
+ *  31        104         0x35
+ *  32         98         0x35
+ *  33        110         0x34
+ *  34        104         0x34
+ *  35         98         0x34
+ *  36        110         0x33
+ *  37        104         0x33
+ *  38         98         0x33
+ *  39        110         0x32
+ *  40        104         0x32
+ *  41         98         0x32
+ *  42        110         0x31
+ *  43        104         0x31
+ *  44         98         0x31
+ *  45        110         0x30
+ *  46        104         0x30
+ *  47         98         0x30
+ *  48        110          0x6
+ *  49        104          0x6
+ *  50         98          0x6
+ *  51        110          0x5
+ *  52        104          0x5
+ *  53         98          0x5
+ *  54        110          0x4
+ *  55        104          0x4
+ *  56         98          0x4
+ *  57        110          0x3
+ *  58        104          0x3
+ *  59         98          0x3
+ *  60        110          0x2
+ *  61        104          0x2
+ *  62         98          0x2
+ *  63        110          0x1
+ *  64        104          0x1
+ *  65         98          0x1
+ *  66        110          0x0
+ *  67        104          0x0
+ *  68         98          0x0
+ *  69         97            0
+ *  70         96            0
+ *  71         95            0
+ *  72         94            0
+ *  73         93            0
+ *  74         92            0
+ *  75         91            0
+ *  76         90            0
+ *  77         89            0
+ *  78         88            0
+ *  79         87            0
+ *  80         86            0
+ *  81         85            0
+ *  82         84            0
+ *  83         83            0
+ *  84         82            0
+ *  85         81            0
+ *  86         80            0
+ *  87         79            0
+ *  88         78            0
+ *  89         77            0
+ *  90         76            0
+ *  91         75            0
+ *  92         74            0
+ *  93         73            0
+ *  94         72            0
+ *  95         71            0
+ *  96         70            0
+ *  97         69            0
+ *  98         68            0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *  -9               123         0x3F      (highest gain)
+ *  -8               117         0x3F
+ *  -7        110         0x3F
+ *  -6        104         0x3F
+ *  -5         98         0x3F
+ *  -4        110         0x3E
+ *  -3        104         0x3E
+ *  -2         98         0x3E
+ *  -1        110         0x3D
+ *   0        104         0x3D
+ *   1         98         0x3D
+ *   2        110         0x3C
+ *   3        104         0x3C
+ *   4         98         0x3C
+ *   5        110         0x3B
+ *   6        104         0x3B
+ *   7         98         0x3B
+ *   8        110         0x3A
+ *   9        104         0x3A
+ *  10         98         0x3A
+ *  11        110         0x39
+ *  12        104         0x39
+ *  13         98         0x39
+ *  14        110         0x38
+ *  15        104         0x38
+ *  16         98         0x38
+ *  17        110         0x37
+ *  18        104         0x37
+ *  19         98         0x37
+ *  20        110         0x36
+ *  21        104         0x36
+ *  22         98         0x36
+ *  23        110         0x35
+ *  24        104         0x35
+ *  25         98         0x35
+ *  26        110         0x34
+ *  27        104         0x34
+ *  28         98         0x34
+ *  29        110         0x33
+ *  30        104         0x33
+ *  31         98         0x33
+ *  32        110         0x32
+ *  33        104         0x32
+ *  34         98         0x32
+ *  35        110         0x31
+ *  36        104         0x31
+ *  37         98         0x31
+ *  38        110         0x30
+ *  39        104         0x30
+ *  40         98         0x30
+ *  41        110         0x25
+ *  42        104         0x25
+ *  43         98         0x25
+ *  44        110         0x24
+ *  45        104         0x24
+ *  46         98         0x24
+ *  47        110         0x23
+ *  48        104         0x23
+ *  49         98         0x23
+ *  50        110         0x22
+ *  51        104         0x18
+ *  52         98         0x18
+ *  53        110         0x17
+ *  54        104         0x17
+ *  55         98         0x17
+ *  56        110         0x16
+ *  57        104         0x16
+ *  58         98         0x16
+ *  59        110         0x15
+ *  60        104         0x15
+ *  61         98         0x15
+ *  62        110         0x14
+ *  63        104         0x14
+ *  64         98         0x14
+ *  65        110         0x13
+ *  66        104         0x13
+ *  67         98         0x13
+ *  68        110         0x12
+ *  69        104         0x08
+ *  70         98         0x08
+ *  71        110         0x07
+ *  72        104         0x07
+ *  73         98         0x07
+ *  74        110         0x06
+ *  75        104         0x06
+ *  76         98         0x06
+ *  77        110         0x05
+ *  78        104         0x05
+ *  79         98         0x05
+ *  80        110         0x04
+ *  81        104         0x04
+ *  82         98         0x04
+ *  83        110         0x03
+ *  84        104         0x03
+ *  85         98         0x03
+ *  86        110         0x02
+ *  87        104         0x02
+ *  88         98         0x02
+ *  89        110         0x01
+ *  90        104         0x01
+ *  91         98         0x01
+ *  92        110         0x00
+ *  93        104         0x00
+ *  94         98         0x00
+ *  95         93         0x00
+ *  96         88         0x00
+ *  97         83         0x00
+ *  98         78         0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated.  These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24   (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52   (34)
+#define IL_TX_POWER_REGULATORY_MIN          (0)
+#define IL_TX_POWER_REGULATORY_MAX          (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion.  This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24   (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52   (38)
+#define IL_TX_POWER_SATURATION_MIN          (20)
+#define IL_TX_POWER_SATURATION_MAX          (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain.  Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below.  If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+       CALIB_CH_GROUP_1 = 0,
+       CALIB_CH_GROUP_2 = 1,
+       CALIB_CH_GROUP_3 = 2,
+       CALIB_CH_GROUP_4 = 3,
+       CALIB_CH_GROUP_5 = 4,
+       CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues).  All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device.  When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS 7
+#define IL49_CMD_FIFO_NUM      4
+#define IL49_NUM_QUEUES        16
+#define IL49_NUM_AMPDU_QUEUES  8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue.  If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+       __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+       u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND            (0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET     44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT  0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN   0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN    0x02
+
+#define IL4965_DEFAULT_TX_RETRY  15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE       10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+void il4965_calib_free_results(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos);
+ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos);
+ssize_t il4965_ucode_general_stats_read(struct file *file,
+                                       char __user *user_buf, size_t count,
+                                       loff_t *ppos);
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND                   (0x1000)
+#define FH49_MEM_UPPER_BOUND                   (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG                (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame).  These 16 pointer registers are offset by 0x04
+ * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND          (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND          (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x)  (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ *     entries (although any power of 2, up to 4096, is selectable by driver).
+ *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ *     (typically 4K, although 8K or 16K are also selectable by driver).
+ *     Driver sets up RB size and number of RBDs in the CB via Rx config
+ *     register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ *     Bit fields within one RBD:
+ *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ *     Driver sets physical address [35:8] of base of RBD circular buffer
+ *     into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ *     (RBs) have been filled, via a "write pointer", actually the idx of
+ *     the RB's corresponding RBD within the circular buffer.  Driver sets
+ *     physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ *     Bit fields in lower dword of Rx status buffer (upper dword not used
+ *     by driver; see struct il4965_shared, val0):
+ *     31-12:  Not used by driver
+ *     11- 0:  Index of last filled Rx buffer descriptor
+ *             (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer.  This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1!  See below).
+ * NOTE:  4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD.  The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process.  When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx.  For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0.  Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND     (FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND     (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0           (FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG        (FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG        (FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR        (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG      for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ *        '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ *        typical value 0x10 (about 1/2 msec)
+ *  3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND      (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND      (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0            (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0)  /* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000)  /* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000)        /* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000)   /* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000)  /* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS     (20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS   (4)
+#define RX_RB_TIMEOUT  (0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ *  24:  1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND           (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND           (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG       (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG    (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+                                       (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE    (0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT      28
+
+/* TFDB  Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND       (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND       (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl)  (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl)  (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ *     3: Enable internal DMA requests (1, normal operation), disable (0)
+ *  2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND  (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND  (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM                            (7)
+#define FH50_TCSR_CHNL_NUM                            (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl)    \
+               (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl)    \
+               (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl)   \
+               (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF               (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV               (0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE  (0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT    (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD   (0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD    (0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT     (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD    (0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD     (0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE     (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    (0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY   (0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT    (0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID   (0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM               (20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX               (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24:  1 = Channel buffers empty (channel 7:0)
+ * 23-16:  1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND          (FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND          (FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG                (FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31:  Indicates an address error when accessed to internal memory
+ *     uCode/driver must write "1" in order to clear this flag
+ * 30:  Indicates that Host did not send the expected number of dwords to FH
+ *     uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ *     command was received from the scheduler while the TRB was already full
+ *     with previous command
+ *     uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ *     bit is set, it indicates that the FH has received a full indication
+ *     from the RTC TxFIFO and the current value of the TxCredit counter was
+ *     not equal to zero. This mean that the credit mechanism was not
+ *     synchronized to the TxFIFO status
+ *     uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG         (FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL         (9)
+#define FH49_SRVC_LOWER_BOUND  (FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND  (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+               (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG       (FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000      /* 4k */
+
+#endif /* __il_4965_h__ */
index aef65cd..05bd375 100644 (file)
@@ -1,4 +1,4 @@
-config IWLWIFI_LEGACY
+config IWLEGACY
        tristate
        select FW_LOADER
        select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
        select MAC80211_LEDS
 
 menu "Debugging Options"
-       depends on IWLWIFI_LEGACY
+       depends on IWLEGACY
 
-config IWLWIFI_LEGACY_DEBUG
-       bool "Enable full debugging output in 4965 and 3945 drivers"
-       depends on IWLWIFI_LEGACY
+config IWLEGACY_DEBUG
+       bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+       depends on IWLEGACY
        ---help---
-         This option will enable debug tracing output for the iwlwifilegacy
+         This option will enable debug tracing output for the iwlegacy
          drivers.
 
          This will result in the kernel module being ~100k larger.  You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
                  % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
 
          You can find the list of debug mask values in:
-                 drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+                 drivers/net/wireless/iwlegacy/common.h
 
          If this is your first time using this driver, you should say Y here
          as the debug information can assist others in helping you resolve
          any problems you may encounter.
 
-config IWLWIFI_LEGACY_DEBUGFS
-        bool "4965 and 3945 debugfs support"
-        depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+config IWLEGACY_DEBUGFS
+        bool "iwlegacy (iwl 3945/4965) debugfs support"
+        depends on IWLEGACY && MAC80211_DEBUGFS
         ---help---
-         Enable creation of debugfs files for the iwlwifilegacy drivers. This
+         Enable creation of debugfs files for the iwlegacy drivers. This
          is a low-impact option that allows getting insight into the
          driver's state at runtime.
 
-config IWLWIFI_LEGACY_DEVICE_TRACING
-       bool "iwlwifilegacy legacy device access tracing"
-       depends on IWLWIFI_LEGACY
-       depends on EVENT_TRACING
-       help
-         Say Y here to trace all commands, including TX frames and IO
-         accesses, sent to the device. If you say yes, iwlwifilegacy will
-         register with the ftrace framework for event tracing and dump
-         all this information to the ringbuffer, you may need to
-         increase the ringbuffer size. See the ftrace documentation
-         for more information.
-
-         When tracing is not enabled, this option still has some
-         (though rather small) overhead.
-
-         If unsure, say Y so we can help you better when problems
-         occur.
 endmenu
 
 config IWL4965
        tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
        depends on PCI && MAC80211
-       select IWLWIFI_LEGACY
+       select IWLEGACY
        ---help---
          This option enables support for
 
@@ -93,7 +76,7 @@ config IWL4965
 config IWL3945
        tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
        depends on PCI && MAC80211
-       select IWLWIFI_LEGACY
+       select IWLEGACY
        ---help---
          Select to build the driver supporting the:
 
index d56aeb3..c985a01 100644 (file)
@@ -1,25 +1,17 @@
-obj-$(CONFIG_IWLWIFI_LEGACY)   += iwl-legacy.o
-iwl-legacy-objs                := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwl-legacy-objs                += iwl-rx.o iwl-tx.o iwl-sta.o
-iwl-legacy-objs                += iwl-scan.o iwl-led.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+obj-$(CONFIG_IWLEGACY) += iwlegacy.o
+iwlegacy-objs          := common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
 
-iwl-legacy-objs += $(iwl-legacy-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
+iwlegacy-objs += $(iwlegacy-m)
 
 # 4965
 obj-$(CONFIG_IWL4965)  += iwl4965.o
-iwl4965-objs           := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
-iwl4965-objs           += iwl-4965-ucode.o iwl-4965-tx.o
-iwl4965-objs           += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
-iwl4965-objs           += iwl-4965-sta.o iwl-4965-eeprom.o
-iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+iwl4965-objs           := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
 
 # 3945
 obj-$(CONFIG_IWL3945)  += iwl3945.o
-iwl3945-objs           := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+iwl3945-objs           := 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
 
 ccflags-y += -D__CHECK_ENDIAN__
similarity index 79%
rename from drivers/net/wireless/iwlegacy/iwl-commands.h
rename to drivers/net/wireless/iwlegacy/commands.h
index 8990405..25dd7d2 100644 (file)
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
- * Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
- */
 
-#ifndef __iwl_legacy_commands_h__
-#define __iwl_legacy_commands_h__
+#ifndef __il_commands_h__
+#define __il_commands_h__
 
-struct iwl_priv;
+#include <linux/ieee80211.h>
 
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver)   (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver)     (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver)  ((ver) & 0x000000FF)
+struct il_priv;
 
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IL_UCODE_MAJOR(ver)    (((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver)    (((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver)      (((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver)   ((ver) & 0x000000FF)
 
 /* Tx rates */
-#define IWL_CCK_RATES  4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES  (IWL_CCK_RATES + IWL_OFDM_RATES)
+#define IL_CCK_RATES   4
+#define IL_OFDM_RATES  8
+#define IL_MAX_RATES   (IL_CCK_RATES + IL_OFDM_RATES)
 
 enum {
-       REPLY_ALIVE = 0x1,
-       REPLY_ERROR = 0x2,
+       N_ALIVE = 0x1,
+       N_ERROR = 0x2,
 
        /* RXON and QOS commands */
-       REPLY_RXON = 0x10,
-       REPLY_RXON_ASSOC = 0x11,
-       REPLY_QOS_PARAM = 0x13,
-       REPLY_RXON_TIMING = 0x14,
+       C_RXON = 0x10,
+       C_RXON_ASSOC = 0x11,
+       C_QOS_PARAM = 0x13,
+       C_RXON_TIMING = 0x14,
 
        /* Multi-Station support */
-       REPLY_ADD_STA = 0x18,
-       REPLY_REMOVE_STA = 0x19,
+       C_ADD_STA = 0x18,
+       C_REM_STA = 0x19,
 
        /* Security */
-       REPLY_WEPKEY = 0x20,
+       C_WEPKEY = 0x20,
 
        /* RX, TX, LEDs */
-       REPLY_3945_RX = 0x1b,           /* 3945 only */
-       REPLY_TX = 0x1c,
-       REPLY_RATE_SCALE = 0x47,        /* 3945 only */
-       REPLY_LEDS_CMD = 0x48,
-       REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+       N_3945_RX = 0x1b,       /* 3945 only */
+       C_TX = 0x1c,
+       C_RATE_SCALE = 0x47,    /* 3945 only */
+       C_LEDS = 0x48,
+       C_TX_LINK_QUALITY_CMD = 0x4e,   /* for 4965 */
 
        /* 802.11h related */
-       REPLY_CHANNEL_SWITCH = 0x72,
-       CHANNEL_SWITCH_NOTIFICATION = 0x73,
-       REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
-       SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+       C_CHANNEL_SWITCH = 0x72,
+       N_CHANNEL_SWITCH = 0x73,
+       C_SPECTRUM_MEASUREMENT = 0x74,
+       N_SPECTRUM_MEASUREMENT = 0x75,
 
        /* Power Management */
-       POWER_TABLE_CMD = 0x77,
-       PM_SLEEP_NOTIFICATION = 0x7A,
-       PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+       C_POWER_TBL = 0x77,
+       N_PM_SLEEP = 0x7A,
+       N_PM_DEBUG_STATS = 0x7B,
 
        /* Scan commands and notifications */
-       REPLY_SCAN_CMD = 0x80,
-       REPLY_SCAN_ABORT_CMD = 0x81,
-       SCAN_START_NOTIFICATION = 0x82,
-       SCAN_RESULTS_NOTIFICATION = 0x83,
-       SCAN_COMPLETE_NOTIFICATION = 0x84,
+       C_SCAN = 0x80,
+       C_SCAN_ABORT = 0x81,
+       N_SCAN_START = 0x82,
+       N_SCAN_RESULTS = 0x83,
+       N_SCAN_COMPLETE = 0x84,
 
        /* IBSS/AP commands */
-       BEACON_NOTIFICATION = 0x90,
-       REPLY_TX_BEACON = 0x91,
+       N_BEACON = 0x90,
+       C_TX_BEACON = 0x91,
 
        /* Miscellaneous commands */
-       REPLY_TX_PWR_TABLE_CMD = 0x97,
+       C_TX_PWR_TBL = 0x97,
 
        /* Bluetooth device coexistence config command */
-       REPLY_BT_CONFIG = 0x9b,
+       C_BT_CONFIG = 0x9b,
 
        /* Statistics */
-       REPLY_STATISTICS_CMD = 0x9c,
-       STATISTICS_NOTIFICATION = 0x9d,
+       C_STATS = 0x9c,
+       N_STATS = 0x9d,
 
        /* RF-KILL commands and notifications */
-       CARD_STATE_NOTIFICATION = 0xa1,
+       N_CARD_STATE = 0xa1,
 
        /* Missed beacons notification */
-       MISSED_BEACONS_NOTIFICATION = 0xa2,
+       N_MISSED_BEACONS = 0xa2,
 
-       REPLY_CT_KILL_CONFIG_CMD = 0xa4,
-       SENSITIVITY_CMD = 0xa8,
-       REPLY_PHY_CALIBRATION_CMD = 0xb0,
-       REPLY_RX_PHY_CMD = 0xc0,
-       REPLY_RX_MPDU_CMD = 0xc1,
-       REPLY_RX = 0xc3,
-       REPLY_COMPRESSED_BA = 0xc5,
+       C_CT_KILL_CONFIG = 0xa4,
+       C_SENSITIVITY = 0xa8,
+       C_PHY_CALIBRATION = 0xb0,
+       N_RX_PHY = 0xc0,
+       N_RX_MPDU = 0xc1,
+       N_RX = 0xc3,
+       N_COMPRESSED_BA = 0xc5,
 
-       REPLY_MAX = 0xff
+       IL_CN_MAX = 0xff
 };
 
 /******************************************************************************
@@ -163,25 +159,25 @@ enum {
  *
  *****************************************************************************/
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
 
 #define SEQ_TO_QUEUE(s)        (((s) >> 8) & 0x1f)
 #define QUEUE_TO_SEQ(q)        (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s)        ((s) & 0xff)
-#define INDEX_TO_SEQ(i)        ((i) & 0xff)
+#define SEQ_TO_IDX(s)  ((s) & 0xff)
+#define IDX_TO_SEQ(i)  ((i) & 0xff)
 #define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
 #define SEQ_RX_FRAME   cpu_to_le16(0x8000)
 
 /**
- * struct iwl_cmd_header
+ * struct il_cmd_header
  *
  * This header format appears in the beginning of each command sent from the
  * driver, and each response/notification received from uCode.
  */
-struct iwl_cmd_header {
-       u8 cmd;         /* Command ID:  REPLY_RXON, etc. */
-       u8 flags;       /* 0:5 reserved, 6 abort, 7 internal */
+struct il_cmd_header {
+       u8 cmd;                 /* Command ID:  C_RXON, etc. */
+       u8 flags;               /* 0:5 reserved, 6 abort, 7 internal */
        /*
         * The driver sets up the sequence number to values of its choosing.
         * uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
         * There is one exception:  uCode sets bit 15 when it originates
         * the response/notification, i.e. when the response/notification
         * is not a direct response to a command sent by the driver.  For
-        * example, uCode issues REPLY_3945_RX when it sends a received frame
+        * example, uCode issues N_3945_RX when it sends a received frame
         * to the driver; it is not a direct response to any driver command.
         *
         * The Linux driver uses the following format:
         *
-        *  0:7         tfd index - position within TX queue
-        *  8:12        TX queue id
-        *  13          reserved
-        *  14          huge - driver sets this to indicate command is in the
-        *              'huge' storage at the end of the command buffers
-        *  15          unsolicited RX or uCode-originated notification
-       */
+        *  0:7         tfd idx - position within TX queue
+        *  8:12        TX queue id
+        *  13          reserved
+        *  14          huge - driver sets this to indicate command is in the
+        *              'huge' storage at the end of the command buffers
+        *  15          unsolicited RX or uCode-originated notification
+        */
        __le16 sequence;
 
        /* command or response/notification data follows immediately */
        u8 data[0];
 } __packed;
 
-
 /**
- * struct iwl3945_tx_power
+ * struct il3945_tx_power
  *
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
  *
  * Each entry contains two values:
  * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
  * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
  *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
  *
- * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
  */
-struct iwl3945_tx_power {
+struct il3945_tx_power {
        u8 tx_gain;             /* gain for analog radio */
        u8 dsp_atten;           /* gain for DSP */
 } __packed;
 
 /**
- * struct iwl3945_power_per_rate
+ * struct il3945_power_per_rate
  *
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
  */
-struct iwl3945_power_per_rate {
+struct il3945_power_per_rate {
        u8 rate;                /* plcp */
-       struct iwl3945_tx_power tpc;
+       struct il3945_tx_power tpc;
        u8 reserved;
 } __packed;
 
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
  * iwl4965 rate_n_flags bit fields
  *
  * rate_n_flags format is used in following iwl4965 commands:
- *  REPLY_RX (response only)
- *  REPLY_RX_MPDU (response only)
- *  REPLY_TX (both command and response)
- *  REPLY_TX_LINK_QUALITY_CMD
+ *  N_RX (response only)
+ *  N_RX_MPDU (response only)
+ *  C_TX (both command and response)
+ *  C_TX_LINK_QUALITY_CMD
  *
  * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
  *  2-0:  0)   6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
 #define RATE_MCS_ANT_ABC_MSK   (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
 #define RATE_ANT_NUM 3
 
-#define POWER_TABLE_NUM_ENTRIES                        33
-#define POWER_TABLE_NUM_HT_OFDM_ENTRIES                32
-#define POWER_TABLE_CCK_ENTRY                  32
+#define POWER_TBL_NUM_ENTRIES                  33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES          32
+#define POWER_TBL_CCK_ENTRY                    32
 
-#define IWL_PWR_NUM_HT_OFDM_ENTRIES            24
-#define IWL_PWR_CCK_ENTRIES                    2
+#define IL_PWR_NUM_HT_OFDM_ENTRIES             24
+#define IL_PWR_CCK_ENTRIES                     2
 
 /**
- * union iwl4965_tx_power_dual_stream
+ * union il4965_tx_power_dual_stream
  *
- * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
  * Use __le32 version (struct tx_power_dual_stream) when building command.
  *
  * Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
  * For MIMO rates, one value may be different from the other,
  * in order to balance the Tx output between the two transmitters.
  *
- * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ * See more details in doc for TXPOWER in 4965.h.
  */
-union iwl4965_tx_power_dual_stream {
+union il4965_tx_power_dual_stream {
        struct {
                u8 radio_tx_gain[2];
                u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
 /**
  * struct tx_power_dual_stream
  *
- * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
  *
- * Same format as iwl_tx_power_dual_stream, but __le32
+ * Same format as il_tx_power_dual_stream, but __le32
  */
 struct tx_power_dual_stream {
        __le32 dw;
 } __packed;
 
 /**
- * struct iwl4965_tx_power_db
+ * struct il4965_tx_power_db
  *
- * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
  */
-struct iwl4965_tx_power_db {
-       struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+struct il4965_tx_power_db {
+       struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
 } __packed;
 
 /******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
 #define INITIALIZE_SUBTYPE    (9)
 
 /*
- * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
  *
  * uCode issues this "initialize alive" notification once the initialization
  * uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
  * 3)  Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
  *     for each of 5 frequency ranges.
  */
-struct iwl_init_alive_resp {
+struct il_init_alive_resp {
        u8 ucode_minor;
        u8 ucode_major;
        __le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
                                 * 2 Tx chains */
 } __packed;
 
-
 /**
- * REPLY_ALIVE = 0x1 (response only, not a command)
+ * N_ALIVE = 0x1 (response only, not a command)
  *
  * uCode issues this "alive" notification once the runtime image is ready
  * to receive commands from the driver.  This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
  *     __le32 log_size;     log capacity (in number of entries)
  *     __le32 type;         (1) timestamp with each entry, (0) no timestamp
  *     __le32 wraps;        # times uCode has wrapped to top of circular buffer
- *      __le32 write_index;  next circular buffer entry that uCode would fill
+ *      __le32 write_idx;  next circular buffer entry that uCode would fill
  *
  *     The header is followed by the circular buffer of log entries.  Entries
  *     with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
  * The Linux driver can print both logs to the system log when a uCode error
  * occurs.
  */
-struct iwl_alive_resp {
+struct il_alive_resp {
        u8 ucode_minor;
        u8 ucode_major;
        __le16 reserved1;
        u8 sw_rev[8];
        u8 ver_type;
-       u8 ver_subtype;                 /* not "9" for runtime alive */
+       u8 ver_subtype;         /* not "9" for runtime alive */
        __le16 reserved2;
        __le32 log_event_table_ptr;     /* SRAM address for event log */
        __le32 error_event_table_ptr;   /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
 } __packed;
 
 /*
- * REPLY_ERROR = 0x2 (response only, not a command)
+ * N_ERROR = 0x2 (response only, not a command)
  */
-struct iwl_error_resp {
+struct il_error_resp {
        __le32 error_type;
        u8 cmd_id;
        u8 reserved1;
@@ -554,7 +548,6 @@ enum {
        RXON_DEV_TYPE_SNIFFER = 6,
 };
 
-
 #define RXON_RX_CHAIN_DRIVER_FORCE_MSK         cpu_to_le16(0x1 << 0)
 #define RXON_RX_CHAIN_DRIVER_FORCE_POS         (0)
 #define RXON_RX_CHAIN_VALID_MSK                        cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
 * (according to ON_AIR deassertion) */
 #define RXON_FLG_TSF2HOST_MSK           cpu_to_le32(1 << 15)
 
-
 /* HT flags */
 #define RXON_FLG_CTRL_CHANNEL_LOC_POS          (22)
 #define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK       cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
 #define RXON_FILTER_BCON_AWARE_MSK      cpu_to_le32(1 << 6)
 
 /**
- * REPLY_RXON = 0x10 (command, has simple generic response)
+ * C_RXON = 0x10 (command, has simple generic response)
  *
  * RXON tunes the radio tuner to a service channel, and sets up a number
  * of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
  *        channel.
  *
  * NOTE:  All RXONs wipe clean the internal txpower table.  Driver must
- *        issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ *        issue a new C_TX_PWR_TBL after each C_RXON (0x10),
  *        regardless of whether RXON_FILTER_ASSOC_MSK is set.
  */
 
-struct iwl3945_rxon_cmd {
+struct il3945_rxon_cmd {
        u8 node_addr[6];
        __le16 reserved1;
        u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
        __le16 reserved5;
 } __packed;
 
-struct iwl4965_rxon_cmd {
+struct il4965_rxon_cmd {
        u8 node_addr[6];
        __le16 reserved1;
        u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
 /* Create a common rxon cmd which will be typecast into the 3945 or 4965
  * specific rxon cmd, depending on where it is called from.
  */
-struct iwl_legacy_rxon_cmd {
+struct il_rxon_cmd {
        u8 node_addr[6];
        __le16 reserved1;
        u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
        u8 reserved5;
 } __packed;
 
-
 /*
- * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
  */
-struct iwl3945_rxon_assoc_cmd {
+struct il3945_rxon_assoc_cmd {
        __le32 flags;
        __le32 filter_flags;
        u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
        __le16 reserved;
 } __packed;
 
-struct iwl4965_rxon_assoc_cmd {
+struct il4965_rxon_assoc_cmd {
        __le32 flags;
        __le32 filter_flags;
        u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
        __le16 reserved;
 } __packed;
 
-#define IWL_CONN_MAX_LISTEN_INTERVAL   10
-#define IWL_MAX_UCODE_BEACON_INTERVAL  4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL        1 /* 1024 */
+#define IL_CONN_MAX_LISTEN_INTERVAL    10
+#define IL_MAX_UCODE_BEACON_INTERVAL   4       /* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL 1       /* 1024 */
 
 /*
- * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
  */
-struct iwl_rxon_time_cmd {
+struct il_rxon_time_cmd {
        __le64 timestamp;
        __le16 beacon_interval;
-       __le16 atim_window;
+       __le16 atim_win;
        __le32 beacon_init_val;
        __le16 listen_interval;
        u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
 } __packed;
 
 /*
- * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
  */
-struct iwl3945_channel_switch_cmd {
+struct il3945_channel_switch_cmd {
        u8 band;
        u8 expect_beacon;
        __le16 channel;
        __le32 rxon_flags;
        __le32 rxon_filter_flags;
        __le32 switch_time;
-       struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+       struct il3945_power_per_rate power[IL_MAX_RATES];
 } __packed;
 
-struct iwl4965_channel_switch_cmd {
+struct il4965_channel_switch_cmd {
        u8 band;
        u8 expect_beacon;
        __le16 channel;
        __le32 rxon_flags;
        __le32 rxon_filter_flags;
        __le32 switch_time;
-       struct iwl4965_tx_power_db tx_power;
+       struct il4965_tx_power_db tx_power;
 } __packed;
 
 /*
- * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
  */
-struct iwl_csa_notification {
+struct il_csa_notification {
        __le16 band;
        __le16 channel;
        __le32 status;          /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
  *****************************************************************************/
 
 /**
- * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
- * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
  *
- * @cw_min: Contention window, start value in numbers of slots.
+ * @cw_min: Contention win, start value in numbers of slots.
  *          Should be a power-of-2, minus 1.  Device's default is 0x0f.
- * @cw_max: Contention window, max value in numbers of slots.
+ * @cw_max: Contention win, max value in numbers of slots.
  *          Should be a power-of-2, minus 1.  Device's default is 0x3f.
  * @aifsn:  Number of slots in Arbitration Interframe Space (before
  *          performing random backoff timing prior to Tx).  Device default 1.
  * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
  *
- * Device will automatically increase contention window by (2*CW) + 1 for each
+ * Device will automatically increase contention win by (2*CW) + 1 for each
  * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
  * value, to cap the CW value.
  */
-struct iwl_ac_qos {
+struct il_ac_qos {
        __le16 cw_min;
        __le16 cw_max;
        u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
 #define AC_NUM                4
 
 /*
- * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
  *
  * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
  * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
  */
-struct iwl_qosparam_cmd {
+struct il_qosparam_cmd {
        __le32 qos_flags;
-       struct iwl_ac_qos ac[AC_NUM];
+       struct il_ac_qos ac[AC_NUM];
 } __packed;
 
 /******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
  */
 
 /* Special, dedicated locations within device's station table */
-#define        IWL_AP_ID               0
-#define        IWL_STA_ID              2
-#define        IWL3945_BROADCAST_ID    24
-#define IWL3945_STATION_COUNT  25
-#define IWL4965_BROADCAST_ID   31
-#define        IWL4965_STATION_COUNT   32
+#define        IL_AP_ID                0
+#define        IL_STA_ID               2
+#define        IL3945_BROADCAST_ID     24
+#define IL3945_STATION_COUNT   25
+#define IL4965_BROADCAST_ID    31
+#define        IL4965_STATION_COUNT    32
 
-#define        IWL_STATION_COUNT       32      /* MAX(3945,4965)*/
-#define        IWL_INVALID_STATION     255
+#define        IL_STATION_COUNT        32      /* MAX(3945,4965) */
+#define        IL_INVALID_STATION      255
 
 #define STA_FLG_TX_RATE_MSK            cpu_to_le32(1 << 2)
 #define STA_FLG_PWR_SAVE_MSK           cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
 #define STA_MODIFY_DELBA_TID_MSK       0x10
 #define STA_MODIFY_SLEEP_TX_COUNT_MSK  0x20
 
-/* Receiver address (actually, Rx station's index into station table),
+/* Receiver address (actually, Rx station's idx into station table),
  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
 #define BUILD_RAxTID(sta_id, tid)      (((sta_id) << 4) + (tid))
 
-struct iwl4965_keyinfo {
+struct il4965_keyinfo {
        __le16 key_flags;
        u8 tkip_rx_tsc_byte2;   /* TSC[2] for key mix ph1 detection */
        u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
 /**
  * struct sta_id_modify
  * @addr[ETH_ALEN]: station's MAC address
- * @sta_id: index of station in uCode's station table
+ * @sta_id: idx of station in uCode's station table
  * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
  *
- * Driver selects unused table index when adding new station,
- * or the index to a pre-existing station entry when modifying that station.
- * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
  *
  * modify_mask flags select which parameters to modify vs. leave alone.
  */
@@ -936,15 +927,15 @@ struct sta_id_modify {
 } __packed;
 
 /*
- * REPLY_ADD_STA = 0x18 (command)
+ * C_ADD_STA = 0x18 (command)
  *
  * The device contains an internal table of per-station information,
  * with info on security keys, aggregation parameters, and Tx rates for
  * initial Tx attempt and any retries (4965 devices uses
- * REPLY_TX_LINK_QUALITY_CMD,
- * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
  *
- * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * C_ADD_STA sets up the table entry for one station, either creating
  * a new entry, or modifying a pre-existing one.
  *
  * NOTE:  RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
  *        their own txpower/rate setup data).
  *
  *        When getting started on a new channel, driver must set up the
- *        IWL_BROADCAST_ID entry (last entry in the table).  For a client
+ *        IL_BROADCAST_ID entry (last entry in the table).  For a client
  *        station in a BSS, once an AP is selected, driver sets up the AP STA
- *        in the IWL_AP_ID entry (1st entry in the table).  BROADCAST and AP
+ *        in the IL_AP_ID entry (1st entry in the table).  BROADCAST and AP
  *        are all that are needed for a BSS client station.  If the device is
  *        used as AP, or in an IBSS network, driver must set up station table
- *        entries for all STAs in network, starting with index IWL_STA_ID.
+ *        entries for all STAs in network, starting with idx IL_STA_ID.
  */
 
-struct iwl3945_addsta_cmd {
+struct il3945_addsta_cmd {
        u8 mode;                /* 1: modify existing, 0: add new station */
        u8 reserved[3];
        struct sta_id_modify sta;
-       struct iwl4965_keyinfo key;
-       __le32 station_flags;           /* STA_FLG_* */
+       struct il4965_keyinfo key;
+       __le32 station_flags;   /* STA_FLG_* */
        __le32 station_flags_msk;       /* STA_FLG_* */
 
        /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
        __le16 add_immediate_ba_ssn;
 } __packed;
 
-struct iwl4965_addsta_cmd {
+struct il4965_addsta_cmd {
        u8 mode;                /* 1: modify existing, 0: add new station */
        u8 reserved[3];
        struct sta_id_modify sta;
-       struct iwl4965_keyinfo key;
-       __le32 station_flags;           /* STA_FLG_* */
+       struct il4965_keyinfo key;
+       __le32 station_flags;   /* STA_FLG_* */
        __le32 station_flags_msk;       /* STA_FLG_* */
 
        /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
         * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
        __le16 tid_disable_tx;
 
-       __le16  reserved1;
+       __le16 reserved1;
 
        /* TID for which to add block-ack support.
         * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
 } __packed;
 
 /* Wrapper struct for 3945 and 4965 addsta_cmd structures */
-struct iwl_legacy_addsta_cmd {
+struct il_addsta_cmd {
        u8 mode;                /* 1: modify existing, 0: add new station */
        u8 reserved[3];
        struct sta_id_modify sta;
-       struct iwl4965_keyinfo key;
-       __le32 station_flags;           /* STA_FLG_* */
+       struct il4965_keyinfo key;
+       __le32 station_flags;   /* STA_FLG_* */
        __le32 station_flags_msk;       /* STA_FLG_* */
 
        /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
         * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
        __le16 tid_disable_tx;
 
-       __le16  rate_n_flags;           /* 3945 only */
+       __le16 rate_n_flags;    /* 3945 only */
 
        /* TID for which to add block-ack support.
         * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
        __le16 reserved2;
 } __packed;
 
-
 #define ADD_STA_SUCCESS_MSK            0x1
-#define ADD_STA_NO_ROOM_IN_TABLE       0x2
+#define ADD_STA_NO_ROOM_IN_TBL 0x2
 #define ADD_STA_NO_BLOCK_ACK_RESOURCE  0x4
 #define ADD_STA_MODIFY_NON_EXIST_STA   0x8
 /*
- * REPLY_ADD_STA = 0x18 (response)
+ * C_ADD_STA = 0x18 (response)
  */
-struct iwl_add_sta_resp {
-       u8 status;      /* ADD_STA_* */
+struct il_add_sta_resp {
+       u8 status;              /* ADD_STA_* */
 } __packed;
 
 #define REM_STA_SUCCESS_MSK              0x1
 /*
- *  REPLY_REM_STA = 0x19 (response)
+ *  C_REM_STA = 0x19 (response)
  */
-struct iwl_rem_sta_resp {
+struct il_rem_sta_resp {
        u8 status;
 } __packed;
 
 /*
- *  REPLY_REM_STA = 0x19 (command)
+ *  C_REM_STA = 0x19 (command)
  */
-struct iwl_rem_sta_cmd {
-       u8 num_sta;     /* number of removed stations */
+struct il_rem_sta_cmd {
+       u8 num_sta;             /* number of removed stations */
        u8 reserved[3];
-       u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+       u8 addr[ETH_ALEN];      /* MAC addr of the first station */
        u8 reserved2[2];
 } __packed;
 
-#define IWL_TX_FIFO_BK_MSK             cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK             cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK             cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK             cpu_to_le32(BIT(3))
-#define IWL_AGG_TX_QUEUE_MSK           cpu_to_le32(0xffc00)
+#define IL_TX_FIFO_BK_MSK              cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK              cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK              cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK              cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK            cpu_to_le32(0xffc00)
 
-#define IWL_DROP_SINGLE                0
-#define IWL_DROP_SELECTED      1
-#define IWL_DROP_ALL           2
+#define IL_DROP_SINGLE         0
+#define IL_DROP_SELECTED       1
+#define IL_DROP_ALL            2
 
 /*
  * REPLY_WEP_KEY = 0x20
  */
-struct iwl_wep_key {
-       u8 key_index;
+struct il_wep_key {
+       u8 key_idx;
        u8 key_offset;
        u8 reserved1[2];
        u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
        u8 key[16];
 } __packed;
 
-struct iwl_wep_cmd {
+struct il_wep_cmd {
        u8 num_keys;
        u8 global_key_type;
        u8 flags;
        u8 reserved;
-       struct iwl_wep_key key[0];
+       struct il_wep_key key[0];
 } __packed;
 
 #define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
 #define RX_MPDU_RES_STATUS_TTAK_OK     (1 << 7)
 #define RX_MPDU_RES_STATUS_DEC_DONE_MSK        (0x800)
 
-
-struct iwl3945_rx_frame_stats {
+struct il3945_rx_frame_stats {
        u8 phy_count;
        u8 id;
        u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
        u8 payload[0];
 } __packed;
 
-struct iwl3945_rx_frame_hdr {
+struct il3945_rx_frame_hdr {
        __le16 channel;
        __le16 phy_flags;
        u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
        u8 payload[0];
 } __packed;
 
-struct iwl3945_rx_frame_end {
+struct il3945_rx_frame_end {
        __le32 status;
        __le64 timestamp;
        __le32 beacon_timestamp;
 } __packed;
 
 /*
- * REPLY_3945_RX = 0x1b (response only, not a command)
+ * N_3945_RX = 0x1b (response only, not a command)
  *
  * NOTE:  DO NOT dereference from casts to this structure
  * It is provided only for calculating minimum data set size.
  * The actual offsets of the hdr and end are dynamic based on
  * stats.phy_count
  */
-struct iwl3945_rx_frame {
-       struct iwl3945_rx_frame_stats stats;
-       struct iwl3945_rx_frame_hdr hdr;
-       struct iwl3945_rx_frame_end end;
+struct il3945_rx_frame {
+       struct il3945_rx_frame_stats stats;
+       struct il3945_rx_frame_hdr hdr;
+       struct il3945_rx_frame_end end;
 } __packed;
 
-#define IWL39_RX_FRAME_SIZE    (4 + sizeof(struct iwl3945_rx_frame))
+#define IL39_RX_FRAME_SIZE     (4 + sizeof(struct il3945_rx_frame))
 
 /* Fixed (non-configurable) rx data from phy */
 
-#define IWL49_RX_RES_PHY_CNT 14
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET     (4)
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK       (0x70)
-#define IWL49_AGC_DB_MASK                      (0x3f80)        /* MASK(7,13) */
-#define IWL49_AGC_DB_POS                       (7)
-struct iwl4965_rx_non_cfg_phy {
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET      (4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK        (0x70)
+#define IL49_AGC_DB_MASK                       (0x3f80)        /* MASK(7,13) */
+#define IL49_AGC_DB_POS                        (7)
+struct il4965_rx_non_cfg_phy {
        __le16 ant_selection;   /* ant A bit 4, ant B bit 5, ant C bit 6 */
        __le16 agc_info;        /* agc code 0:6, agc dB 7:13, reserved 14:15 */
        u8 rssi_info[6];        /* we use even entries, 0/2/4 for A/B/C rssi */
        u8 pad[0];
 } __packed;
 
-
 /*
- * REPLY_RX = 0xc3 (response only, not a command)
+ * N_RX = 0xc3 (response only, not a command)
  * Used only for legacy (non 11n) frames.
  */
-struct iwl_rx_phy_res {
-       u8 non_cfg_phy_cnt;     /* non configurable DSP phy data byte count */
+struct il_rx_phy_res {
+       u8 non_cfg_phy_cnt;     /* non configurable DSP phy data byte count */
        u8 cfg_phy_cnt;         /* configurable DSP phy data byte count */
        u8 stat_id;             /* configurable DSP phy data set ID */
        u8 reserved1;
        __le64 timestamp;       /* TSF at on air rise */
-       __le32 beacon_time_stamp; /* beacon at on-air rise */
+       __le32 beacon_time_stamp;       /* beacon at on-air rise */
        __le16 phy_flags;       /* general phy flags: band, modulation, ... */
        __le16 channel;         /* channel number */
-       u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+       u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
        __le32 rate_n_flags;    /* RATE_MCS_* */
        __le16 byte_count;      /* frame's byte-count */
        __le16 frame_time;      /* frame's time on the air */
 } __packed;
 
-struct iwl_rx_mpdu_res_start {
+struct il_rx_mpdu_res_start {
        __le16 byte_count;
        __le16 reserved;
 } __packed;
 
-
 /******************************************************************************
  * (5)
  * Tx Commands & Responses:
  *
- * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * Driver must place each C_TX command into one of the prioritized Tx
  * queues in host DRAM, shared between driver and device (see comments for
  * SCD registers and Tx/Rx Queues).  When the device's Tx scheduler and uCode
  * are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
  * uCode handles all timing and protocol related to control frames
  * (RTS/CTS/ACK), based on flags in the Tx command.  uCode and Tx scheduler
  * handle reception of block-acks; uCode updates the host driver via
- * REPLY_COMPRESSED_BA.
+ * N_COMPRESSED_BA.
  *
  * uCode handles retrying Tx when an ACK is expected but not received.
  * This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (4965).
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
  *
- * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
  * This command must be executed after every RXON command, before Tx can occur.
  *****************************************************************************/
 
-/* REPLY_TX Tx flags field */
+/* C_TX Tx flags field */
 
 /*
  * 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
 #define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
 
 /* For 4965 devices:
- * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
- *    Tx command's initial_rate_index indicates first rate to try;
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ *    Tx command's initial_rate_idx indicates first rate to try;
  *    uCode walks through table for additional Tx attempts.
  * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
  *    This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
 /* 1: uCode overrides sequence control field in MAC header.
  * 0: Driver provides sequence control field in MAC header.
  * Set this for management frames, non-QOS data frames, non-unicast frames,
- * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+ * and also in Tx command embedded in C_SCAN for active scans. */
 #define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
 
 /* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
 /* HCCA-AP - disable duration overwriting. */
 #define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
 
-
 /*
  * TX command security control
  */
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
 #define TKIP_ICV_LEN 4
 
 /*
- * REPLY_TX = 0x1c (command)
+ * C_TX = 0x1c (command)
  */
 
-struct iwl3945_tx_cmd {
+struct il3945_tx_cmd {
        /*
         * MPDU byte count:
         * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
 } __packed;
 
 /*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
  */
-struct iwl3945_tx_resp {
+struct il3945_tx_resp {
        u8 failure_rts;
        u8 failure_frame;
        u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
        __le32 status;          /* TX status */
 } __packed;
 
-
 /*
  * 4965 uCode updates these Tx attempt count values in host DRAM.
  * Used for managing Tx retries when expecting block-acks.
  * Driver should set these fields to 0.
  */
-struct iwl_dram_scratch {
+struct il_dram_scratch {
        u8 try_cnt;             /* Tx attempts */
        u8 bt_kill_cnt;         /* Tx attempts blocked by Bluetooth device */
        __le16 reserved;
 } __packed;
 
-struct iwl_tx_cmd {
+struct il_tx_cmd {
        /*
         * MPDU byte count:
         * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
 
        /* uCode may modify this field of the Tx command (in host DRAM!).
         * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
-       struct iwl_dram_scratch scratch;
+       struct il_dram_scratch scratch;
 
        /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
        __le32 rate_n_flags;    /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
        u8 sec_ctl;             /* TX_CMD_SEC_* */
 
        /*
-        * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+        * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
         * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set.  Normally "0" for
         * data frames, this field may be used to selectively reduce initial
         * rate (via non-0 value) for special frames (e.g. management), while
         * still supporting rate scaling for all frames.
         */
-       u8 initial_rate_index;
+       u8 initial_rate_idx;
        u8 reserved;
        u8 key[16];
        __le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
 };
 
 enum {
-       TX_STATUS_MSK = 0x000000ff,             /* bits 0:7 */
+       TX_STATUS_MSK = 0x000000ff,     /* bits 0:7 */
        TX_STATUS_DELAY_MSK = 0x00000040,
        TX_STATUS_ABORT_MSK = 0x00000080,
        TX_PACKET_MODE_MSK = 0x0000ff00,        /* bits 8:15 */
        TX_FIFO_NUMBER_MSK = 0x00070000,        /* bits 16:18 */
-       TX_RESERVED = 0x00780000,               /* bits 19:22 */
+       TX_RESERVED = 0x00780000,       /* bits 19:22 */
        TX_POWER_PA_DETECT_MSK = 0x7f800000,    /* bits 23:30 */
        TX_ABORT_REQUIRED_MSK = 0x80000000,     /* bits 31:31 */
 };
@@ -1671,7 +1656,7 @@ enum {
 #define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
 
 /*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
  *
  * This response may be in one of two slightly different formats, indicated
  * by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
        __le16 sequence;
 } __packed;
 
-struct iwl4965_tx_resp {
+struct il4965_tx_resp {
        u8 frame_count;         /* 1 no aggregation, >1 aggregation */
        u8 bt_kill_count;       /* # blocked by bluetooth (unused for agg) */
        u8 failure_rts;         /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
         */
        union {
                __le32 status;
-               struct agg_tx_status agg_status[0]; /* for each agg frame */
+               struct agg_tx_status agg_status[0];     /* for each agg frame */
        } u;
 } __packed;
 
 /*
- * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
  *
  * Reports Block-Acknowledge from recipient station
  */
-struct iwl_compressed_ba_resp {
+struct il_compressed_ba_resp {
        __le32 sta_addr_lo32;
        __le16 sta_addr_hi16;
        __le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
 } __packed;
 
 /*
- * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
  *
- * See details under "TXPOWER" in iwl-4965-hw.h.
+ * See details under "TXPOWER" in 4965.h.
  */
 
-struct iwl3945_txpowertable_cmd {
+struct il3945_txpowertable_cmd {
        u8 band;                /* 0: 5 GHz, 1: 2.4 GHz */
        u8 reserved;
        __le16 channel;
-       struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+       struct il3945_power_per_rate power[IL_MAX_RATES];
 } __packed;
 
-struct iwl4965_txpowertable_cmd {
+struct il4965_txpowertable_cmd {
        u8 band;                /* 0: 5 GHz, 1: 2.4 GHz */
        u8 reserved;
        __le16 channel;
-       struct iwl4965_tx_power_db tx_power;
+       struct il4965_tx_power_db tx_power;
 } __packed;
 
-
 /**
- * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
  *
- * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
  *
  * NOTE: The table of rates passed to the uCode via the
  * RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
  *
  * For example, if you set 9MB (PLCP 0x0f) as the first
  * rate in the rate table, the bit mask for that rate
- * when passed through ofdm_basic_rates on the REPLY_RXON
+ * when passed through ofdm_basic_rates on the C_RXON
  * command would be bit 0 (1 << 0)
  */
-struct iwl3945_rate_scaling_info {
+struct il3945_rate_scaling_info {
        __le16 rate_n_flags;
        u8 try_cnt;
-       u8 next_rate_index;
+       u8 next_rate_idx;
 } __packed;
 
-struct iwl3945_rate_scaling_cmd {
+struct il3945_rate_scaling_cmd {
        u8 table_id;
        u8 reserved[3];
-       struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+       struct il3945_rate_scaling_info table[IL_MAX_RATES];
 } __packed;
 
-
 /*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
 #define  LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK   (1 << 0)
 
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
 #define  LINK_QUAL_ANT_B_MSK (1 << 1)
 #define  LINK_QUAL_ANT_MSK   (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
 
-
 /**
- * struct iwl_link_qual_general_params
+ * struct il_link_qual_general_params
  *
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
  */
-struct iwl_link_qual_general_params {
+struct il_link_qual_general_params {
        u8 flags;
 
-       /* No entries at or above this (driver chosen) index contain MIMO */
+       /* No entries at or above this (driver chosen) idx contain MIMO */
        u8 mimo_delimiter;
 
        /* Best single antenna to use for single stream (legacy, SISO). */
        u8 single_stream_ant_msk;       /* LINK_QUAL_ANT_* */
 
        /* Best antennas to use for MIMO (unused for 4965, assumes both). */
-       u8 dual_stream_ant_msk;         /* LINK_QUAL_ANT_* */
+       u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
 
        /*
         * If driver needs to use different initial rates for different
         * EDCA QOS access categories (as implemented by tx fifos 0-3),
-        * this table will set that up, by indicating the indexes in the
+        * this table will set that up, by indicating the idxes in the
         * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
         * Otherwise, driver should set all entries to 0.
         *
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
         * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
         * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
         */
-       u8 start_rate_index[LINK_QUAL_AC_NUM];
+       u8 start_rate_idx[LINK_QUAL_AC_NUM];
 } __packed;
 
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)  /* 4 milliseconds */
 #define LINK_QUAL_AGG_TIME_LIMIT_MAX   (8000)
 #define LINK_QUAL_AGG_TIME_LIMIT_MIN   (100)
 
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
 #define LINK_QUAL_AGG_FRAME_LIMIT_MIN  (0)
 
 /**
- * struct iwl_link_qual_agg_params
+ * struct il_link_qual_agg_params
  *
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
  */
-struct iwl_link_qual_agg_params {
+struct il_link_qual_agg_params {
 
        /*
         *Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
 } __packed;
 
 /*
- * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
  *
- * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
  *
  * Each station in the 4965 device's internal station table has its own table
  * of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
  * one station.
  *
  * NOTE:  Station must already be in 4965 device's station table.
- *       Use REPLY_ADD_STA.
+ *       Use C_ADD_STA.
  *
  * The rate scaling procedures described below work well.  Of course, other
  * procedures are possible, and may work better for particular environments.
  *
  *
- * FILLING THE RATE TABLE
+ * FILLING THE RATE TBL
  *
  * Given a particular initial rate and mode, as determined by the rate
  * scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
  * speculative mode as the new current active mode.
  *
  * Each history set contains, separately for each possible rate, data for a
- * sliding window of the 62 most recent tx attempts at that rate.  The data
+ * sliding win of the 62 most recent tx attempts at that rate.  The data
  * includes a shifting bitmap of success(1)/failure(0), and sums of successful
  * and attempted frames, from which the driver can additionally calculate a
  * success ratio (success / attempted) and number of failures
- * (attempted - success), and control the size of the window (attempted).
+ * (attempted - success), and control the size of the win (attempted).
  * The driver uses the bit map to remove successes from the success sum, as
- * the oldest tx attempts fall out of the window.
+ * the oldest tx attempts fall out of the win.
  *
  * When the 4965 device makes multiple tx attempts for a given frame, each
  * attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
  *
  * When using block-ack (aggregation), all frames are transmitted at the same
  * rate, since there is no per-attempt acknowledgment from the destination
- * station.  The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * station.  The Tx response struct il_tx_resp indicates the Tx rate in
  * rate_n_flags field.  After receiving a block-ack, the driver can update
  * history for the entire block all at once.
  *
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
  *         good performance; higher rate is sure to have poorer success.
  *
  * 6)  Re-evaluate the rate after each tx frame.  If working with block-
- *     acknowledge, history and statistics may be calculated for the entire
- *     block (including prior history that fits within the history windows),
+ *     acknowledge, history and stats may be calculated for the entire
+ *     block (including prior history that fits within the history wins),
  *     before re-evaluation.
  *
  * FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
  * legacy), and then repeat the search process.
  *
  */
-struct iwl_link_quality_cmd {
+struct il_link_quality_cmd {
 
        /* Index of destination/recipient station in uCode's station table */
        u8 sta_id;
        u8 reserved1;
        __le16 control;         /* not used */
-       struct iwl_link_qual_general_params general_params;
-       struct iwl_link_qual_agg_params agg_params;
+       struct il_link_qual_general_params general_params;
+       struct il_link_qual_agg_params agg_params;
 
        /*
-        * Rate info; when using rate-scaling, Tx command's initial_rate_index
-        * specifies 1st Tx rate attempted, via index into this table.
+        * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+        * specifies 1st Tx rate attempted, via idx into this table.
         * 4965 devices works its way through table when retrying Tx.
         */
        struct {
-               __le32 rate_n_flags;    /* RATE_MCS_*, IWL_RATE_* */
+               __le32 rate_n_flags;    /* RATE_MCS_*, RATE_* */
        } rs_table[LINK_QUAL_MAX_RETRY_NUM];
        __le32 reserved2;
 } __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
 #define BT_MAX_KILL_DEF (0x5)
 
 /*
- * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
  *
  * 3945 and 4965 devices support hardware handshake with Bluetooth device on
  * same platform.  Bluetooth device alerts wireless device when it will Tx;
  * wireless device can delay or kill its own Tx to accommodate.
  */
-struct iwl_bt_cmd {
+struct il_bt_cmd {
        u8 flags;
        u8 lead_time;
        u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
        __le32 kill_cts_mask;
 } __packed;
 
-
 /******************************************************************************
  * (6)
  * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
                                 RXON_FILTER_ASSOC_MSK           | \
                                 RXON_FILTER_BCON_AWARE_MSK)
 
-struct iwl_measure_channel {
+struct il_measure_channel {
        __le32 duration;        /* measurement duration in extended beacon
                                 * format */
        u8 channel;             /* channel to measure */
-       u8 type;                /* see enum iwl_measure_type */
+       u8 type;                /* see enum il_measure_type */
        __le16 reserved;
 } __packed;
 
 /*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
  */
-struct iwl_spectrum_cmd {
+struct il_spectrum_cmd {
        __le16 len;             /* number of bytes starting from token */
        u8 token;               /* token id */
        u8 id;                  /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
        __le32 filter_flags;    /* rxon filter flags */
        __le16 channel_count;   /* minimum 1, maximum 10 */
        __le16 reserved3;
-       struct iwl_measure_channel channels[10];
+       struct il_measure_channel channels[10];
 } __packed;
 
 /*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
  */
-struct iwl_spectrum_resp {
+struct il_spectrum_resp {
        u8 token;
        u8 id;                  /* id of the prior command replaced, or 0xff */
        __le16 status;          /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
                                 *     measurement) */
 } __packed;
 
-enum iwl_measurement_state {
-       IWL_MEASUREMENT_START = 0,
-       IWL_MEASUREMENT_STOP = 1,
+enum il_measurement_state {
+       IL_MEASUREMENT_START = 0,
+       IL_MEASUREMENT_STOP = 1,
 };
 
-enum iwl_measurement_status {
-       IWL_MEASUREMENT_OK = 0,
-       IWL_MEASUREMENT_CONCURRENT = 1,
-       IWL_MEASUREMENT_CSA_CONFLICT = 2,
-       IWL_MEASUREMENT_TGH_CONFLICT = 3,
+enum il_measurement_status {
+       IL_MEASUREMENT_OK = 0,
+       IL_MEASUREMENT_CONCURRENT = 1,
+       IL_MEASUREMENT_CSA_CONFLICT = 2,
+       IL_MEASUREMENT_TGH_CONFLICT = 3,
        /* 4-5 reserved */
-       IWL_MEASUREMENT_STOPPED = 6,
-       IWL_MEASUREMENT_TIMEOUT = 7,
-       IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+       IL_MEASUREMENT_STOPPED = 6,
+       IL_MEASUREMENT_TIMEOUT = 7,
+       IL_MEASUREMENT_PERIODIC_FAILED = 8,
 };
 
 #define NUM_ELEMENTS_IN_HISTOGRAM 8
 
-struct iwl_measurement_histogram {
+struct il_measurement_histogram {
        __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
        __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];  /* in 1usec counts */
 } __packed;
 
 /* clear channel availability counters */
-struct iwl_measurement_cca_counters {
+struct il_measurement_cca_counters {
        __le32 ofdm;
        __le32 cck;
 } __packed;
 
-enum iwl_measure_type {
-       IWL_MEASURE_BASIC = (1 << 0),
-       IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
-       IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
-       IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
-       IWL_MEASURE_FRAME = (1 << 4),
+enum il_measure_type {
+       IL_MEASURE_BASIC = (1 << 0),
+       IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+       IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+       IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+       IL_MEASURE_FRAME = (1 << 4),
        /* bits 5:6 are reserved */
-       IWL_MEASURE_IDLE = (1 << 7),
+       IL_MEASURE_IDLE = (1 << 7),
 };
 
 /*
- * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
  */
-struct iwl_spectrum_notification {
+struct il_spectrum_notification {
        u8 id;                  /* measurement id -- 0 or 1 */
        u8 token;
-       u8 channel_index;       /* index in measurement channel list */
+       u8 channel_idx;         /* idx in measurement channel list */
        u8 state;               /* 0 - start, 1 - stop */
        __le32 start_time;      /* lower 32-bits of TSF */
        u8 band;                /* 0 - 5.2GHz, 1 - 2.4GHz */
        u8 channel;
-       u8 type;                /* see enum iwl_measurement_type */
+       u8 type;                /* see enum il_measurement_type */
        u8 reserved1;
        /* NOTE:  cca_ofdm, cca_cck, basic_type, and histogram are only only
         * valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
        u8 basic_type;          /* 0 - bss, 1 - ofdm preamble, 2 -
                                 * unidentified */
        u8 reserved2[3];
-       struct iwl_measurement_histogram histogram;
+       struct il_measurement_histogram histogram;
        __le32 stop_time;       /* lower 32-bits of TSF */
-       __le32 status;          /* see iwl_measurement_status */
+       __le32 status;          /* see il_measurement_status */
 } __packed;
 
 /******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
  *****************************************************************************/
 
 /**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct il_powertable_cmd - Power Table Command
  * @flags: See below:
  *
- * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
  *
  * PM allow:
  *   bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
  *              '10' force xtal sleep
  *              '11' Illegal set
  *
- * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
  * ucode assume sleep over DTIM is allowed and we don't need to wake up
  * for every DTIM.
  */
-#define IWL_POWER_VEC_SIZE 5
+#define IL_POWER_VEC_SIZE 5
 
-#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK       cpu_to_le16(BIT(0))
-#define IWL_POWER_PCI_PM_MSK                   cpu_to_le16(BIT(3))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK        cpu_to_le16(BIT(0))
+#define IL_POWER_PCI_PM_MSK                    cpu_to_le16(BIT(3))
 
-struct iwl3945_powertable_cmd {
+struct il3945_powertable_cmd {
        __le16 flags;
        u8 reserved[2];
        __le32 rx_data_timeout;
        __le32 tx_data_timeout;
-       __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+       __le32 sleep_interval[IL_POWER_VEC_SIZE];
 } __packed;
 
-struct iwl_powertable_cmd {
+struct il_powertable_cmd {
        __le16 flags;
-       u8 keep_alive_seconds;          /* 3945 reserved */
-       u8 debug_flags;                 /* 3945 reserved */
+       u8 keep_alive_seconds;  /* 3945 reserved */
+       u8 debug_flags;         /* 3945 reserved */
        __le32 rx_data_timeout;
        __le32 tx_data_timeout;
-       __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+       __le32 sleep_interval[IL_POWER_VEC_SIZE];
        __le32 keep_alive_beacons;
 } __packed;
 
 /*
- * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
  * all devices identical.
  */
-struct iwl_sleep_notification {
+struct il_sleep_notification {
        u8 pm_sleep_mode;
        u8 pm_wakeup_src;
        __le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
 
 /* Sleep states.  all devices identical. */
 enum {
-       IWL_PM_NO_SLEEP = 0,
-       IWL_PM_SLP_MAC = 1,
-       IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
-       IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
-       IWL_PM_SLP_PHY = 4,
-       IWL_PM_SLP_REPENT = 5,
-       IWL_PM_WAKEUP_BY_TIMER = 6,
-       IWL_PM_WAKEUP_BY_DRIVER = 7,
-       IWL_PM_WAKEUP_BY_RFKILL = 8,
+       IL_PM_NO_SLEEP = 0,
+       IL_PM_SLP_MAC = 1,
+       IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+       IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+       IL_PM_SLP_PHY = 4,
+       IL_PM_SLP_REPENT = 5,
+       IL_PM_WAKEUP_BY_TIMER = 6,
+       IL_PM_WAKEUP_BY_DRIVER = 7,
+       IL_PM_WAKEUP_BY_RFKILL = 8,
        /* 3 reserved */
-       IWL_PM_NUM_OF_MODES = 12,
+       IL_PM_NUM_OF_MODES = 12,
 };
 
 /*
- * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
  */
-struct iwl_card_state_notif {
+struct il_card_state_notif {
        __le32 flags;
 } __packed;
 
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
 #define CT_CARD_DISABLED   0x04
 #define RXON_CARD_DISABLED 0x10
 
-struct iwl_ct_kill_config {
-       __le32   reserved;
-       __le32   critical_temperature_M;
-       __le32   critical_temperature_R;
-}  __packed;
+struct il_ct_kill_config {
+       __le32 reserved;
+       __le32 critical_temperature_M;
+       __le32 critical_temperature_R;
+} __packed;
 
 /******************************************************************************
  * (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
 #define SCAN_CHANNEL_TYPE_ACTIVE  cpu_to_le32(1)
 
 /**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * struct il_scan_channel - entry in C_SCAN channel table
  *
  * One for each channel in the scan list.
  * Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
  *     quiet_plcp_th, good_CRC_th)
  *
  * To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * under struct il_scan_cmd about max_out_time and quiet_time):
  * 1)  If using passive_dwell (i.e. passive_dwell != 0):
  *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
  * 2)  quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
  *     passive_dwell < max_out_time
  *     active_dwell < max_out_time
  */
-struct iwl3945_scan_channel {
+struct il3945_scan_channel {
        /*
         * type is defined as:
         * 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
         * 5:7 reserved
         */
        u8 type;
-       u8 channel;     /* band is selected by iwl3945_scan_cmd "flags" field */
-       struct iwl3945_tx_power tpc;
+       u8 channel;             /* band is selected by il3945_scan_cmd "flags" field */
+       struct il3945_tx_power tpc;
        __le16 active_dwell;    /* in 1024-uSec TU (time units), typ 5-50 */
        __le16 passive_dwell;   /* in 1024-uSec TU (time units), typ 20-500 */
 } __packed;
 
 /* set number of direct probes u8 type */
-#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
 
-struct iwl_scan_channel {
+struct il_scan_channel {
        /*
         * type is defined as:
         * 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
         * 21:31 reserved
         */
        __le32 type;
-       __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+       __le16 channel;         /* band is selected by il_scan_cmd "flags" field */
        u8 tx_gain;             /* gain for analog radio */
        u8 dsp_atten;           /* gain for DSP */
        __le16 active_dwell;    /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
 } __packed;
 
 /* set number of direct probes __le32 type */
-#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+#define IL_SCAN_PROBE_MASK(n)  cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
 
 /**
- * struct iwl_ssid_ie - directed scan network information element
+ * struct il_ssid_ie - directed scan network information element
  *
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
  * each channel may select different ssids from among the 20 (4) entries.
  * SSID IEs get transmitted in reverse order of entry.
  */
-struct iwl_ssid_ie {
+struct il_ssid_ie {
        u8 id;
        u8 len;
        u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
 #define PROBE_OPTION_MAX_3945          4
 #define PROBE_OPTION_MAX               20
 #define TX_CMD_LIFE_TIME_INFINITE      cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH_DISABLED       0
-#define IWL_GOOD_CRC_TH_DEFAULT                cpu_to_le16(1)
-#define IWL_GOOD_CRC_TH_NEVER          cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
-#define IWL_MAX_CMD_SIZE 4096
+#define IL_GOOD_CRC_TH_DISABLED        0
+#define IL_GOOD_CRC_TH_DEFAULT         cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER           cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
 
 /*
- * REPLY_SCAN_CMD = 0x80 (command)
+ * C_SCAN = 0x80 (command)
  *
  * The hardware scan command is very powerful; the driver can set it up to
  * maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
  * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
  *
  * To avoid uCode errors, see timing restrictions described under
- * struct iwl_scan_channel.
+ * struct il_scan_channel.
  */
 
-struct iwl3945_scan_cmd {
+struct il3945_scan_cmd {
        __le16 len;
        u8 reserved0;
        u8 channel_count;       /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
 
        /* For active scans (set to all-0s for passive scans).
         * Does not include payload.  Must specify Tx rate; no rate scaling. */
-       struct iwl3945_tx_cmd tx_cmd;
+       struct il3945_tx_cmd tx_cmd;
 
        /* For directed active scans (set to all-0s otherwise) */
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+       struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
 
        /*
         * Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
         * Number of channels in list is specified by channel_count.
         * Each channel in list is of type:
         *
-        * struct iwl3945_scan_channel channels[0];
+        * struct il3945_scan_channel channels[0];
         *
         * NOTE:  Only one band of channels can be scanned per pass.  You
         * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
-        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
         * before requesting another scan.
         */
        u8 data[0];
 } __packed;
 
-struct iwl_scan_cmd {
+struct il_scan_cmd {
        __le16 len;
        u8 reserved0;
        u8 channel_count;       /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
 
        /* For active scans (set to all-0s for passive scans).
         * Does not include payload.  Must specify Tx rate; no rate scaling. */
-       struct iwl_tx_cmd tx_cmd;
+       struct il_tx_cmd tx_cmd;
 
        /* For directed active scans (set to all-0s otherwise) */
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+       struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
 
        /*
         * Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
         * Number of channels in list is specified by channel_count.
         * Each channel in list is of type:
         *
-        * struct iwl_scan_channel channels[0];
+        * struct il_scan_channel channels[0];
         *
         * NOTE:  Only one band of channels can be scanned per pass.  You
         * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
-        * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+        * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
         * before requesting another scan.
         */
        u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
 #define ABORT_STATUS            0x2
 
 /*
- * REPLY_SCAN_CMD = 0x80 (response)
+ * C_SCAN = 0x80 (response)
  */
-struct iwl_scanreq_notification {
+struct il_scanreq_notification {
        __le32 status;          /* 1: okay, 2: cannot fulfill request */
 } __packed;
 
 /*
- * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ * N_SCAN_START = 0x82 (notification only, not a command)
  */
-struct iwl_scanstart_notification {
+struct il_scanstart_notification {
        __le32 tsf_low;
        __le32 tsf_high;
        __le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
 #define  SCAN_OWNER_STATUS 0x1
 #define  MEASURE_OWNER_STATUS 0x2
 
-#define IWL_PROBE_STATUS_OK            0
-#define IWL_PROBE_STATUS_TX_FAILED     BIT(0)
+#define IL_PROBE_STATUS_OK             0
+#define IL_PROBE_STATUS_TX_FAILED      BIT(0)
 /* error statuses combined with TX_FAILED */
-#define IWL_PROBE_STATUS_FAIL_TTL      BIT(1)
-#define IWL_PROBE_STATUS_FAIL_BT       BIT(2)
+#define IL_PROBE_STATUS_FAIL_TTL       BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT        BIT(2)
 
-#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+#define NUMBER_OF_STATS 1      /* first __le32 is good CRC */
 /*
- * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
  */
-struct iwl_scanresults_notification {
+struct il_scanresults_notification {
        u8 channel;
        u8 band;
        u8 probe_status;
-       u8 num_probe_not_sent; /* not enough time to send */
+       u8 num_probe_not_sent;  /* not enough time to send */
        __le32 tsf_low;
        __le32 tsf_high;
-       __le32 statistics[NUMBER_OF_STATISTICS];
+       __le32 stats[NUMBER_OF_STATS];
 } __packed;
 
 /*
- * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
  */
-struct iwl_scancomplete_notification {
+struct il_scancomplete_notification {
        u8 scanned_channels;
        u8 status;
        u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
        __le32 tsf_high;
 } __packed;
 
-
 /******************************************************************************
  * (9)
  * IBSS/AP Commands and Notifications:
  *
  *****************************************************************************/
 
-enum iwl_ibss_manager {
-       IWL_NOT_IBSS_MANAGER = 0,
-       IWL_IBSS_MANAGER = 1,
+enum il_ibss_manager {
+       IL_NOT_IBSS_MANAGER = 0,
+       IL_IBSS_MANAGER = 1,
 };
 
 /*
- * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ * N_BEACON = 0x90 (notification only, not a command)
  */
 
-struct iwl3945_beacon_notif {
-       struct iwl3945_tx_resp beacon_notify_hdr;
+struct il3945_beacon_notif {
+       struct il3945_tx_resp beacon_notify_hdr;
        __le32 low_tsf;
        __le32 high_tsf;
        __le32 ibss_mgr_status;
 } __packed;
 
-struct iwl4965_beacon_notif {
-       struct iwl4965_tx_resp beacon_notify_hdr;
+struct il4965_beacon_notif {
+       struct il4965_tx_resp beacon_notify_hdr;
        __le32 low_tsf;
        __le32 high_tsf;
        __le32 ibss_mgr_status;
 } __packed;
 
 /*
- * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
  */
 
-struct iwl3945_tx_beacon_cmd {
-       struct iwl3945_tx_cmd tx;
+struct il3945_tx_beacon_cmd {
+       struct il3945_tx_cmd tx;
        __le16 tim_idx;
        u8 tim_size;
        u8 reserved1;
        struct ieee80211_hdr frame[0];  /* beacon frame */
 } __packed;
 
-struct iwl_tx_beacon_cmd {
-       struct iwl_tx_cmd tx;
+struct il_tx_beacon_cmd {
+       struct il_tx_cmd tx;
        __le16 tim_idx;
        u8 tim_size;
        u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
  *
  *****************************************************************************/
 
-#define IWL_TEMP_CONVERT 260
+#define IL_TEMP_CONVERT 260
 
 #define SUP_RATE_11A_MAX_NUM_CHANNELS  8
 #define SUP_RATE_11B_MAX_NUM_CHANNELS  4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
        } failed;
 } __packed;
 
-/* statistics command response */
+/* stats command response */
 
-struct iwl39_statistics_rx_phy {
+struct iwl39_stats_rx_phy {
        __le32 ina_cnt;
        __le32 fina_cnt;
        __le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
        __le32 sent_cts_cnt;
 } __packed;
 
-struct iwl39_statistics_rx_non_phy {
+struct iwl39_stats_rx_non_phy {
        __le32 bogus_cts;       /* CTS received when not expecting CTS */
        __le32 bogus_ack;       /* ACK received when not expecting ACK */
        __le32 non_bssid_frames;        /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
                                         * our serving channel */
 } __packed;
 
-struct iwl39_statistics_rx {
-       struct iwl39_statistics_rx_phy ofdm;
-       struct iwl39_statistics_rx_phy cck;
-       struct iwl39_statistics_rx_non_phy general;
+struct iwl39_stats_rx {
+       struct iwl39_stats_rx_phy ofdm;
+       struct iwl39_stats_rx_phy cck;
+       struct iwl39_stats_rx_non_phy general;
 } __packed;
 
-struct iwl39_statistics_tx {
+struct iwl39_stats_tx {
        __le32 preamble_cnt;
        __le32 rx_detected_cnt;
        __le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
        __le32 actual_ack_cnt;
 } __packed;
 
-struct statistics_dbg {
+struct stats_dbg {
        __le32 burst_check;
        __le32 burst_count;
        __le32 wait_for_silence_timeout_cnt;
        __le32 reserved[3];
 } __packed;
 
-struct iwl39_statistics_div {
+struct iwl39_stats_div {
        __le32 tx_on_a;
        __le32 tx_on_b;
        __le32 exec_time;
        __le32 probe_time;
 } __packed;
 
-struct iwl39_statistics_general {
+struct iwl39_stats_general {
        __le32 temperature;
-       struct statistics_dbg dbg;
+       struct stats_dbg dbg;
        __le32 sleep_time;
        __le32 slots_out;
        __le32 slots_idle;
        __le32 ttl_timestamp;
-       struct iwl39_statistics_div div;
+       struct iwl39_stats_div div;
 } __packed;
 
-struct statistics_rx_phy {
+struct stats_rx_phy {
        __le32 ina_cnt;
        __le32 fina_cnt;
        __le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
        __le32 reserved3;
 } __packed;
 
-struct statistics_rx_ht_phy {
+struct stats_rx_ht_phy {
        __le32 plcp_err;
        __le32 overrun_err;
        __le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
 
 #define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
 
-struct statistics_rx_non_phy {
+struct stats_rx_non_phy {
        __le32 bogus_cts;       /* CTS received when not expecting CTS */
        __le32 bogus_ack;       /* ACK received when not expecting ACK */
        __le32 non_bssid_frames;        /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
        __le32 num_missed_bcon; /* number of missed beacons */
        __le32 adc_rx_saturation_time;  /* count in 0.8us units the time the
                                         * ADC was in saturation */
-       __le32 ina_detection_search_time;/* total time (in 0.8us) searched
-                                         * for INA */
+       __le32 ina_detection_search_time;       /* total time (in 0.8us) searched
+                                                * for INA */
        __le32 beacon_silence_rssi_a;   /* RSSI silence after beacon frame */
        __le32 beacon_silence_rssi_b;   /* RSSI silence after beacon frame */
        __le32 beacon_silence_rssi_c;   /* RSSI silence after beacon frame */
        __le32 interference_data_flag;  /* flag for interference data
                                         * availability. 1 when data is
                                         * available. */
-       __le32 channel_load;            /* counts RX Enable time in uSec */
+       __le32 channel_load;    /* counts RX Enable time in uSec */
        __le32 dsp_false_alarms;        /* DSP false alarm (both OFDM
                                         * and CCK) counter */
        __le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
        __le32 beacon_energy_c;
 } __packed;
 
-struct statistics_rx {
-       struct statistics_rx_phy ofdm;
-       struct statistics_rx_phy cck;
-       struct statistics_rx_non_phy general;
-       struct statistics_rx_ht_phy ofdm_ht;
+struct stats_rx {
+       struct stats_rx_phy ofdm;
+       struct stats_rx_phy cck;
+       struct stats_rx_non_phy general;
+       struct stats_rx_ht_phy ofdm_ht;
 } __packed;
 
 /**
- * struct statistics_tx_power - current tx power
+ * struct stats_tx_power - current tx power
  *
  * @ant_a: current tx power on chain a in 1/2 dB step
  * @ant_b: current tx power on chain b in 1/2 dB step
  * @ant_c: current tx power on chain c in 1/2 dB step
  */
-struct statistics_tx_power {
+struct stats_tx_power {
        u8 ant_a;
        u8 ant_b;
        u8 ant_c;
        u8 reserved;
 } __packed;
 
-struct statistics_tx_non_phy_agg {
+struct stats_tx_non_phy_agg {
        __le32 ba_timeout;
        __le32 ba_reschedule_frames;
        __le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
        __le32 rx_ba_rsp_cnt;
 } __packed;
 
-struct statistics_tx {
+struct stats_tx {
        __le32 preamble_cnt;
        __le32 rx_detected_cnt;
        __le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
        __le32 burst_abort_missing_next_frame_cnt;
        __le32 cts_timeout_collision;
        __le32 ack_or_ba_timeout_collision;
-       struct statistics_tx_non_phy_agg agg;
+       struct stats_tx_non_phy_agg agg;
 
        __le32 reserved1;
 } __packed;
 
-
-struct statistics_div {
+struct stats_div {
        __le32 tx_on_a;
        __le32 tx_on_b;
        __le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
        __le32 reserved2;
 } __packed;
 
-struct statistics_general_common {
-       __le32 temperature;   /* radio temperature */
-       struct statistics_dbg dbg;
+struct stats_general_common {
+       __le32 temperature;     /* radio temperature */
+       struct stats_dbg dbg;
        __le32 sleep_time;
        __le32 slots_out;
        __le32 slots_idle;
        __le32 ttl_timestamp;
-       struct statistics_div div;
+       struct stats_div div;
        __le32 rx_enable_counter;
        /*
         * num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
        __le32 num_of_sos_states;
 } __packed;
 
-struct statistics_general {
-       struct statistics_general_common common;
+struct stats_general {
+       struct stats_general_common common;
        __le32 reserved2;
        __le32 reserved3;
 } __packed;
 
-#define UCODE_STATISTICS_CLEAR_MSK             (0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK         (0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK       (0x1 << 2)
+#define UCODE_STATS_CLEAR_MSK          (0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK              (0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK    (0x1 << 2)
 
 /*
- * REPLY_STATISTICS_CMD = 0x9c,
+ * C_STATS = 0x9c,
  * all devices identical.
  *
- * This command triggers an immediate response containing uCode statistics.
- * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
  *
  * If the CLEAR_STATS configuration flag is set, uCode will clear its
- * internal copy of the statistics (counters) after issuing the response.
- * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
  *
  * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
- * STATISTICS_NOTIFICATIONs after received beacons (see below).  This flag
- * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ * N_STATSs after received beacons (see below).  This flag
+ * does not affect the response to the C_STATS 0x9c itself.
  */
-#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)    /* see above */
-#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
-struct iwl_statistics_cmd {
-       __le32 configuration_flags;     /* IWL_STATS_CONF_* */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)     /* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)   /* see above */
+struct il_stats_cmd {
+       __le32 configuration_flags;     /* IL_STATS_CONF_* */
 } __packed;
 
 /*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ * N_STATS = 0x9d (notification only, not a command)
  *
  * By default, uCode issues this notification after receiving a beacon
  * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
+ * C_STATS 0x9c, above.
  *
  * Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * cleared when changing channels or when driver issues C_STATS
  * 0x9c with CLEAR_STATS bit set (see above).
  *
- * uCode also issues this notification during scans.  uCode clears statistics
- * appropriately so that each notification contains statistics for only the
+ * uCode also issues this notification during scans.  uCode clears stats
+ * appropriately so that each notification contains stats for only the
  * one channel that has just been scanned.
  */
-#define STATISTICS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
-#define STATISTICS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
+#define STATS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
 
-struct iwl3945_notif_statistics {
+struct il3945_notif_stats {
        __le32 flag;
-       struct iwl39_statistics_rx rx;
-       struct iwl39_statistics_tx tx;
-       struct iwl39_statistics_general general;
+       struct iwl39_stats_rx rx;
+       struct iwl39_stats_tx tx;
+       struct iwl39_stats_general general;
 } __packed;
 
-struct iwl_notif_statistics {
+struct il_notif_stats {
        __le32 flag;
-       struct statistics_rx rx;
-       struct statistics_tx tx;
-       struct statistics_general general;
+       struct stats_rx rx;
+       struct stats_tx tx;
+       struct stats_general general;
 } __packed;
 
 /*
- * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
  *
- * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
  * in regardless of how many missed beacons, which mean when driver receive the
  * notification, inside the command, it can find all the beacons information
  * which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
  *
  */
 
-#define IWL_MISSED_BEACON_THRESHOLD_MIN        (1)
-#define IWL_MISSED_BEACON_THRESHOLD_DEF        (5)
-#define IWL_MISSED_BEACON_THRESHOLD_MAX        IWL_MISSED_BEACON_THRESHOLD_DEF
+#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
 
-struct iwl_missed_beacon_notif {
+struct il_missed_beacon_notif {
        __le32 consecutive_missed_beacons;
        __le32 total_missed_becons;
        __le32 num_expected_beacons;
        __le32 num_recvd_beacons;
 } __packed;
 
-
 /******************************************************************************
  * (11)
  * Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
  *****************************************************************************/
 
 /**
- * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
  *
  * This command sets up the Rx signal detector for a sensitivity level that
  * is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
  * time listening, not transmitting).  Driver must adjust sensitivity so that
  * the ratio of actual false alarms to actual Rx time falls within this range.
  *
- * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * While associated, uCode delivers N_STATSs after each
  * received beacon.  These provide information to the driver to analyze the
- * sensitivity.  Don't analyze statistics that come in from scanning, or any
- * other non-associated-network source.  Pertinent statistics include:
+ * sensitivity.  Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source.  Pertinent stats include:
  *
- * From "general" statistics (struct statistics_rx_non_phy):
+ * From "general" stats (struct stats_rx_non_phy):
  *
  * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
  *   Measure of energy of desired signal.  Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
  *   uSecs of actual Rx time during beacon period (varies according to
  *   how much time was spent transmitting).
  *
- * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
  *
  * false_alarm_cnt
  *   Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
  *
  * Total number of false alarms = false_alarms + plcp_errs
  *
- * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
  * (notice that the start points for OFDM are at or close to settings for
  * maximum sensitivity):
  *
  *                                             START  /  MIN  /  MAX
- *   HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          90   /   85  /  120
- *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX     170   /  170  /  210
- *   HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX         105   /  105  /  140
- *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX     220   /  220  /  270
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX          90   /   85  /  120
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX     170   /  170  /  210
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX         105   /  105  /  140
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX     220   /  220  /  270
  *
  *   If actual rate of OFDM false alarms (+ plcp_errors) is too high
  *   (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
  *        Reset this to 0 at the first beacon period that falls within the
  *        "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
  *
- * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
  * (notice that the start points for CCK are at maximum sensitivity):
  *
  *                                             START  /  MIN  /  MAX
- *   HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX         125   /  125  /  200
- *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX     200   /  200  /  400
- *   HD_MIN_ENERGY_CCK_DET_INDEX                100   /    0  /  100
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX         125   /  125  /  200
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX     200   /  200  /  400
+ *   HD_MIN_ENERGY_CCK_DET_IDX                100   /    0  /  100
  *
  *   If actual rate of CCK false alarms (+ plcp_errors) is too high
  *   (greater than 50 for each 204.8 msecs listening), method for reducing
  *   sensitivity is:
  *
- *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
  *       up to max 400.
  *
- *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
  *       sensitivity has been reduced a significant amount; bring it up to
  *       a moderate 161.  Otherwise, *add* 3, up to max 200.
  *
- *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
  *       sensitivity has been reduced only a moderate or small amount;
- *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
  *       down to min 0.  Otherwise (if gain has been significantly reduced),
- *       don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *       don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
  *
  *       b)  Save a snapshot of the "silence reference".
  *
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
  *
  *   Method for increasing sensitivity:
  *
- *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
  *       down to min 125.
  *
- *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
  *       down to min 200.
  *
- *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
  *
  *   If actual rate of CCK false alarms (+ plcp_errors) is within good range
  *   (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
  *
  *   2)  If previous beacon had too many CCK false alarms (+ plcp_errors),
  *       give some extra margin to energy threshold by *subtracting* 8
- *       from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *       from value in HD_MIN_ENERGY_CCK_DET_IDX.
  *
  *   For all cases (too few, too many, good range), make sure that the CCK
  *   detection threshold (energy) is below the energy level for robust
  *   detection over the past 10 beacon periods, the "Max cck energy".
  *   Lower values mean higher energy; this means making sure that the value
- *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *   in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
  *
  */
 
 /*
- * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
- */
-#define HD_TABLE_SIZE  (11)    /* number of entries */
-#define HD_MIN_ENERGY_CCK_DET_INDEX                 (0)        /* table indexes */
-#define HD_MIN_ENERGY_OFDM_DET_INDEX                (1)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          (2)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX      (3)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX      (4)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX          (5)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX      (6)
-#define HD_BARKER_CORR_TH_ADD_MIN_INDEX             (7)
-#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX         (8)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX          (9)
-#define HD_OFDM_ENERGY_TH_IN_INDEX                  (10)
-
-/* Control field in struct iwl_sensitivity_cmd */
-#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE  cpu_to_le16(0)
-#define SENSITIVITY_CMD_CONTROL_WORK_TABLE     cpu_to_le16(1)
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
+ */
+#define HD_TBL_SIZE  (11)      /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX                 (0)  /* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX                (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX          (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX      (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX      (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX          (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX      (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX             (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX         (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX          (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX                  (10)
+
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL      cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
 
 /**
- * struct iwl_sensitivity_cmd
+ * struct il_sensitivity_cmd
  * @control:  (1) updates working table, (0) updates default table
- * @table:  energy threshold values, use HD_* as index into table
+ * @table:  energy threshold values, use HD_* as idx into table
  *
  * Always use "1" in "control" to update uCode's working table and DSP.
  */
-struct iwl_sensitivity_cmd {
-       __le16 control;                 /* always use "1" */
-       __le16 table[HD_TABLE_SIZE];    /* use HD_* as index */
+struct il_sensitivity_cmd {
+       __le16 control;         /* always use "1" */
+       __le16 table[HD_TBL_SIZE];      /* use HD_* as idx */
 } __packed;
 
-
 /**
- * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
  *
  * This command sets the relative gains of 4965 device's 3 radio receiver chains.
  *
  * After the first association, driver should accumulate signal and noise
- * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
- * beacons from the associated network (don't collect statistics that come
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
  * in from scanning, or any other non-network source).
  *
  * DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
  * Driver should determine which antennas are actually connected, by comparing
  * average beacon signal levels for the 3 Rx chains.  Accumulate (add) the
  * following values over 20 beacons, one accumulator for each of the chains
- * a/b/c, from struct statistics_rx_non_phy:
+ * a/b/c, from struct stats_rx_non_phy:
  *
  * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
  *
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
  * to antennas, see above) for gain, by comparing the average signal levels
  * detected during the silence after each beacon (background noise).
  * Accumulate (add) the following values over 20 beacons, one accumulator for
- * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
  *
  * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
  *
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
  * (accum_noise[i] - accum_noise[reference]) / 30
  *
  * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
- * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
  * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
  * and set bit 2 to indicate "reduce gain".  The value for the reference
  * (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
 
 /* Phy calibration command for series */
 /* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE    18
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE     18
 enum {
-       IWL_PHY_CALIBRATE_DIFF_GAIN_CMD         = 7,
-       IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+       IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+       IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
 };
 
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE         (253)
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE          (253)
 
-struct iwl_calib_hdr {
+struct il_calib_hdr {
        u8 op_code;
        u8 first_group;
        u8 groups_num;
        u8 data_valid;
 } __packed;
 
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
-       struct iwl_calib_hdr hdr;
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+       struct il_calib_hdr hdr;
        s8 diff_gain_a;         /* see above */
        s8 diff_gain_b;
        s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
 
 /*
  * LEDs Command & Response
- * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ * C_LEDS = 0x48 (command, has simple generic response)
  *
  * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
  * this command turns it on or off, or sets up a periodic blinking cycle.
  */
-struct iwl_led_cmd {
+struct il_led_cmd {
        __le32 interval;        /* "interval" in uSec */
        u8 id;                  /* 1: Activity, 2: Link, 3: Tech */
        u8 off;                 /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
        u8 reserved;
 } __packed;
 
-
 /******************************************************************************
  * (13)
  * Union of all expected notifications/responses:
  *
  *****************************************************************************/
 
-struct iwl_rx_packet {
+#define IL_RX_FRAME_SIZE_MSK   0x00003fff
+
+struct il_rx_pkt {
        /*
         * The first 4 bytes of the RX frame header contain both the RX frame
         * size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
         * 13-00: RX frame size
         */
        __le32 len_n_flags;
-       struct iwl_cmd_header hdr;
+       struct il_cmd_header hdr;
        union {
-               struct iwl3945_rx_frame rx_frame;
-               struct iwl3945_tx_resp tx_resp;
-               struct iwl3945_beacon_notif beacon_status;
-
-               struct iwl_alive_resp alive_frame;
-               struct iwl_spectrum_notification spectrum_notif;
-               struct iwl_csa_notification csa_notif;
-               struct iwl_error_resp err_resp;
-               struct iwl_card_state_notif card_state_notif;
-               struct iwl_add_sta_resp add_sta;
-               struct iwl_rem_sta_resp rem_sta;
-               struct iwl_sleep_notification sleep_notif;
-               struct iwl_spectrum_resp spectrum;
-               struct iwl_notif_statistics stats;
-               struct iwl_compressed_ba_resp compressed_ba;
-               struct iwl_missed_beacon_notif missed_beacon;
+               struct il3945_rx_frame rx_frame;
+               struct il3945_tx_resp tx_resp;
+               struct il3945_beacon_notif beacon_status;
+
+               struct il_alive_resp alive_frame;
+               struct il_spectrum_notification spectrum_notif;
+               struct il_csa_notification csa_notif;
+               struct il_error_resp err_resp;
+               struct il_card_state_notif card_state_notif;
+               struct il_add_sta_resp add_sta;
+               struct il_rem_sta_resp rem_sta;
+               struct il_sleep_notification sleep_notif;
+               struct il_spectrum_resp spectrum;
+               struct il_notif_stats stats;
+               struct il_compressed_ba_resp compressed_ba;
+               struct il_missed_beacon_notif missed_beacon;
                __le32 status;
                u8 raw[0];
        } u;
 } __packed;
 
-#endif                         /* __iwl_legacy_commands_h__ */
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644 (file)
index 0000000..881ba04
--- /dev/null
@@ -0,0 +1,5706 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+       switch (cmd) {
+               IL_CMD(N_ALIVE);
+               IL_CMD(N_ERROR);
+               IL_CMD(C_RXON);
+               IL_CMD(C_RXON_ASSOC);
+               IL_CMD(C_QOS_PARAM);
+               IL_CMD(C_RXON_TIMING);
+               IL_CMD(C_ADD_STA);
+               IL_CMD(C_REM_STA);
+               IL_CMD(C_WEPKEY);
+               IL_CMD(N_3945_RX);
+               IL_CMD(C_TX);
+               IL_CMD(C_RATE_SCALE);
+               IL_CMD(C_LEDS);
+               IL_CMD(C_TX_LINK_QUALITY_CMD);
+               IL_CMD(C_CHANNEL_SWITCH);
+               IL_CMD(N_CHANNEL_SWITCH);
+               IL_CMD(C_SPECTRUM_MEASUREMENT);
+               IL_CMD(N_SPECTRUM_MEASUREMENT);
+               IL_CMD(C_POWER_TBL);
+               IL_CMD(N_PM_SLEEP);
+               IL_CMD(N_PM_DEBUG_STATS);
+               IL_CMD(C_SCAN);
+               IL_CMD(C_SCAN_ABORT);
+               IL_CMD(N_SCAN_START);
+               IL_CMD(N_SCAN_RESULTS);
+               IL_CMD(N_SCAN_COMPLETE);
+               IL_CMD(N_BEACON);
+               IL_CMD(C_TX_BEACON);
+               IL_CMD(C_TX_PWR_TBL);
+               IL_CMD(C_BT_CONFIG);
+               IL_CMD(C_STATS);
+               IL_CMD(N_STATS);
+               IL_CMD(N_CARD_STATE);
+               IL_CMD(N_MISSED_BEACONS);
+               IL_CMD(C_CT_KILL_CONFIG);
+               IL_CMD(C_SENSITIVITY);
+               IL_CMD(C_PHY_CALIBRATION);
+               IL_CMD(N_RX_PHY);
+               IL_CMD(N_RX_MPDU);
+               IL_CMD(N_RX);
+               IL_CMD(N_COMPRESSED_BA);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+                       struct il_rx_pkt *pkt)
+{
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from %s (0x%08X)\n",
+                      il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+               return;
+       }
+#ifdef CONFIG_IWLEGACY_DEBUG
+       switch (cmd->hdr.cmd) {
+       case C_TX_LINK_QUALITY_CMD:
+       case C_SENSITIVITY:
+               D_HC_DUMP("back from %s (0x%08X)\n",
+                         il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+               break;
+       default:
+               D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+                    pkt->hdr.flags);
+       }
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+       int ret;
+
+       BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+       /* An asynchronous command can not expect an SKB to be set. */
+       BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+       /* Assign a generic callback if one is not provided */
+       if (!cmd->callback)
+               cmd->callback = il_generic_cmd_callback;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return -EBUSY;
+
+       ret = il_enqueue_hcmd(il, cmd);
+       if (ret < 0) {
+               IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+                      il_get_cmd_string(cmd->id), ret);
+               return ret;
+       }
+       return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+       int cmd_idx;
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       BUG_ON(cmd->flags & CMD_ASYNC);
+
+       /* A synchronous command can not have a callback set. */
+       BUG_ON(cmd->callback);
+
+       D_INFO("Attempting to send sync command %s\n",
+              il_get_cmd_string(cmd->id));
+
+       set_bit(S_HCMD_ACTIVE, &il->status);
+       D_INFO("Setting HCMD_ACTIVE for command %s\n",
+              il_get_cmd_string(cmd->id));
+
+       cmd_idx = il_enqueue_hcmd(il, cmd);
+       if (cmd_idx < 0) {
+               ret = cmd_idx;
+               IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+                      il_get_cmd_string(cmd->id), ret);
+               goto out;
+       }
+
+       ret = wait_event_timeout(il->wait_command_queue,
+                                !test_bit(S_HCMD_ACTIVE, &il->status),
+                                HOST_COMPLETE_TIMEOUT);
+       if (!ret) {
+               if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+                       IL_ERR("Error sending %s: time out after %dms.\n",
+                              il_get_cmd_string(cmd->id),
+                              jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+                       clear_bit(S_HCMD_ACTIVE, &il->status);
+                       D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+                              il_get_cmd_string(cmd->id));
+                       ret = -ETIMEDOUT;
+                       goto cancel;
+               }
+       }
+
+       if (test_bit(S_RF_KILL_HW, &il->status)) {
+               IL_ERR("Command %s aborted: RF KILL Switch\n",
+                      il_get_cmd_string(cmd->id));
+               ret = -ECANCELED;
+               goto fail;
+       }
+       if (test_bit(S_FW_ERROR, &il->status)) {
+               IL_ERR("Command %s failed: FW Error\n",
+                      il_get_cmd_string(cmd->id));
+               ret = -EIO;
+               goto fail;
+       }
+       if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+               IL_ERR("Error: Response NULL in '%s'\n",
+                      il_get_cmd_string(cmd->id));
+               ret = -EIO;
+               goto cancel;
+       }
+
+       ret = 0;
+       goto out;
+
+cancel:
+       if (cmd->flags & CMD_WANT_SKB) {
+               /*
+                * Cancel the CMD_WANT_SKB flag for the cmd in the
+                * TX cmd queue. Otherwise in case the cmd comes
+                * in later, it will possibly set an invalid
+                * address (cmd->meta.source).
+                */
+               il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+       }
+fail:
+       if (cmd->reply_page) {
+               il_free_pages(il, cmd->reply_page);
+               cmd->reply_page = 0;
+       }
+out:
+       return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+       if (cmd->flags & CMD_ASYNC)
+               return il_send_cmd_async(il, cmd);
+
+       return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+       struct il_host_cmd cmd = {
+               .id = id,
+               .len = len,
+               .data = data,
+       };
+
+       return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+                     void (*callback) (struct il_priv *il,
+                                       struct il_device_cmd *cmd,
+                                       struct il_rx_pkt *pkt))
+{
+       struct il_host_cmd cmd = {
+               .id = id,
+               .len = len,
+               .data = data,
+       };
+
+       cmd.flags |= CMD_ASYNC;
+       cmd.callback = callback;
+
+       return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode,
+                "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput          OFF time(ms)    ON time (ms)
+ *     >300                    25              25
+ *     >200 to 300             40              40
+ *     >100 to 200             55              55
+ *     >70 to 100              65              65
+ *     >50 to 70               75              75
+ *     >20 to 50               85              85
+ *     >10 to 20               95              95
+ *     >5 to 10                110             110
+ *     >1 to 5                 130             130
+ *     >0 to 1                 167             167
+ *     <=0                                     SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+       {.throughput = 0,               .blink_time = 334},
+       {.throughput = 1 * 1024 - 1,    .blink_time = 260},
+       {.throughput = 5 * 1024 - 1,    .blink_time = 220},
+       {.throughput = 10 * 1024 - 1,   .blink_time = 190},
+       {.throughput = 20 * 1024 - 1,   .blink_time = 170},
+       {.throughput = 50 * 1024 - 1,   .blink_time = 150},
+       {.throughput = 70 * 1024 - 1,   .blink_time = 130},
+       {.throughput = 100 * 1024 - 1,  .blink_time = 110},
+       {.throughput = 200 * 1024 - 1,  .blink_time = 80},
+       {.throughput = 300 * 1024 - 1,  .blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+       if (!compensation) {
+               IL_ERR("undefined blink compensation: "
+                      "use pre-defined blinking time\n");
+               return time;
+       }
+
+       return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+       struct il_led_cmd led_cmd = {
+               .id = IL_LED_LINK,
+               .interval = IL_DEF_LED_INTRVL
+       };
+       int ret;
+
+       if (!test_bit(S_READY, &il->status))
+               return -EBUSY;
+
+       if (il->blink_on == on && il->blink_off == off)
+               return 0;
+
+       if (off == 0) {
+               /* led is SOLID_ON */
+               on = IL_LED_SOLID;
+       }
+
+       D_LED("Led blink time compensation=%u\n",
+             il->cfg->base_params->led_compensation);
+       led_cmd.on =
+           il_blink_compensation(il, on,
+                                 il->cfg->base_params->led_compensation);
+       led_cmd.off =
+           il_blink_compensation(il, off,
+                                 il->cfg->base_params->led_compensation);
+
+       ret = il->cfg->ops->led->cmd(il, &led_cmd);
+       if (!ret) {
+               il->blink_on = on;
+               il->blink_off = off;
+       }
+       return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+                     enum led_brightness brightness)
+{
+       struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+       unsigned long on = 0;
+
+       if (brightness > 0)
+               on = IL_LED_SOLID;
+
+       il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+                unsigned long *delay_off)
+{
+       struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+       return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+       int mode = led_mode;
+       int ret;
+
+       if (mode == IL_LED_DEFAULT)
+               mode = il->cfg->led_mode;
+
+       il->led.name =
+           kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+       il->led.brightness_set = il_led_brightness_set;
+       il->led.blink_set = il_led_blink_set;
+       il->led.max_brightness = 1;
+
+       switch (mode) {
+       case IL_LED_DEFAULT:
+               WARN_ON(1);
+               break;
+       case IL_LED_BLINK:
+               il->led.default_trigger =
+                   ieee80211_create_tpt_led_trigger(il->hw,
+                                                    IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+                                                    il_blink,
+                                                    ARRAY_SIZE(il_blink));
+               break;
+       case IL_LED_RF_STATE:
+               il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+               break;
+       }
+
+       ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+       if (ret) {
+               kfree(il->led.name);
+               return;
+       }
+
+       il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+       if (!il->led_registered)
+               return;
+
+       led_classdev_unregister(&il->led);
+       kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel.  We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware.  This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel.  There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
+       183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
+       34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
+       100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
+       145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
+       1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
+       36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+       u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+       int ret = 0;
+
+       D_EEPROM("EEPROM signature=0x%08x\n", gp);
+       switch (gp) {
+       case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+       case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+               break;
+       default:
+               IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+               ret = -ENOENT;
+               break;
+       }
+       return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+       BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+       return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+       if (!il->eeprom)
+               return 0;
+       return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE:  This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+       __le16 *e;
+       u32 gp = _il_rd(il, CSR_EEPROM_GP);
+       int sz;
+       int ret;
+       u16 addr;
+
+       /* allocate eeprom */
+       sz = il->cfg->base_params->eeprom_size;
+       D_EEPROM("NVM size = %d\n", sz);
+       il->eeprom = kzalloc(sz, GFP_KERNEL);
+       if (!il->eeprom) {
+               ret = -ENOMEM;
+               goto alloc_err;
+       }
+       e = (__le16 *) il->eeprom;
+
+       il->cfg->ops->lib->apm_ops.init(il);
+
+       ret = il_eeprom_verify_signature(il);
+       if (ret < 0) {
+               IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+               ret = -ENOENT;
+               goto err;
+       }
+
+       /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+       ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+       if (ret < 0) {
+               IL_ERR("Failed to acquire EEPROM semaphore.\n");
+               ret = -ENOENT;
+               goto err;
+       }
+
+       /* eeprom is an array of 16bit values */
+       for (addr = 0; addr < sz; addr += sizeof(u16)) {
+               u32 r;
+
+               _il_wr(il, CSR_EEPROM_REG,
+                      CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+               ret =
+                   _il_poll_bit(il, CSR_EEPROM_REG,
+                                CSR_EEPROM_REG_READ_VALID_MSK,
+                                CSR_EEPROM_REG_READ_VALID_MSK,
+                                IL_EEPROM_ACCESS_TIMEOUT);
+               if (ret < 0) {
+                       IL_ERR("Time out reading EEPROM[%d]\n", addr);
+                       goto done;
+               }
+               r = _il_rd(il, CSR_EEPROM_REG);
+               e[addr / 2] = cpu_to_le16(r >> 16);
+       }
+
+       D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+                il_eeprom_query16(il, EEPROM_VERSION));
+
+       ret = 0;
+done:
+       il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+
+err:
+       if (ret)
+               il_eeprom_free(il);
+       /* Reset chip to save power until we load uCode during "up". */
+       il_apm_stop(il);
+alloc_err:
+       return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+       kfree(il->eeprom);
+       il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+                      int *eeprom_ch_count,
+                      const struct il_eeprom_channel **eeprom_ch_info,
+                      const u8 **eeprom_ch_idx)
+{
+       u32 offset =
+           il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+       switch (eep_band) {
+       case 1:         /* 2.4GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_1;
+               break;
+       case 2:         /* 4.9GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_2;
+               break;
+       case 3:         /* 5.2GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_3;
+               break;
+       case 4:         /* 5.5GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_4;
+               break;
+       case 5:         /* 5.7GHz band */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_5;
+               break;
+       case 6:         /* 2.4GHz ht40 channels */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_6;
+               break;
+       case 7:         /* 5 GHz ht40 channels */
+               *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+               *eeprom_ch_info =
+                   (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+                                                                    offset);
+               *eeprom_ch_idx = il_eeprom_band_7;
+               break;
+       default:
+               BUG();
+       }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+                           ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+                     const struct il_eeprom_channel *eeprom_ch,
+                     u8 clear_ht40_extension_channel)
+{
+       struct il_channel_info *ch_info;
+
+       ch_info =
+           (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+       if (!il_is_channel_valid(ch_info))
+               return -1;
+
+       D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+                " Ad-Hoc %ssupported\n", ch_info->channel,
+                il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+                CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+                CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+                CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+                eeprom_ch->max_power_avg,
+                ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+                 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+       ch_info->ht40_eeprom = *eeprom_ch;
+       ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+       ch_info->ht40_flags = eeprom_ch->flags;
+       if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+               ch_info->ht40_extension_channel &=
+                   ~clear_ht40_extension_channel;
+
+       return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+                           ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+       int eeprom_ch_count = 0;
+       const u8 *eeprom_ch_idx = NULL;
+       const struct il_eeprom_channel *eeprom_ch_info = NULL;
+       int band, ch;
+       struct il_channel_info *ch_info;
+
+       if (il->channel_count) {
+               D_EEPROM("Channel map already initialized.\n");
+               return 0;
+       }
+
+       D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+       il->channel_count =
+           ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+           ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+           ARRAY_SIZE(il_eeprom_band_5);
+
+       D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+       il->channel_info =
+           kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+                   GFP_KERNEL);
+       if (!il->channel_info) {
+               IL_ERR("Could not allocate channel_info\n");
+               il->channel_count = 0;
+               return -ENOMEM;
+       }
+
+       ch_info = il->channel_info;
+
+       /* Loop through the 5 EEPROM bands adding them in order to the
+        * channel map we maintain (that contains additional information than
+        * what just in the EEPROM) */
+       for (band = 1; band <= 5; band++) {
+
+               il_init_band_reference(il, band, &eeprom_ch_count,
+                                      &eeprom_ch_info, &eeprom_ch_idx);
+
+               /* Loop through each band adding each of the channels */
+               for (ch = 0; ch < eeprom_ch_count; ch++) {
+                       ch_info->channel = eeprom_ch_idx[ch];
+                       ch_info->band =
+                           (band ==
+                            1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+                       /* permanently store EEPROM's channel regulatory flags
+                        *   and max power in channel info database. */
+                       ch_info->eeprom = eeprom_ch_info[ch];
+
+                       /* Copy the run-time flags so they are there even on
+                        * invalid channels */
+                       ch_info->flags = eeprom_ch_info[ch].flags;
+                       /* First write that ht40 is not enabled, and then enable
+                        * one by one */
+                       ch_info->ht40_extension_channel =
+                           IEEE80211_CHAN_NO_HT40;
+
+                       if (!(il_is_channel_valid(ch_info))) {
+                               D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+                                        "No traffic\n", ch_info->channel,
+                                        ch_info->flags,
+                                        il_is_channel_a_band(ch_info) ? "5.2" :
+                                        "2.4");
+                               ch_info++;
+                               continue;
+                       }
+
+                       /* Initialize regulatory-based run-time data */
+                       ch_info->max_power_avg = ch_info->curr_txpow =
+                           eeprom_ch_info[ch].max_power_avg;
+                       ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+                       ch_info->min_power = 0;
+
+                       D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+                                " Ad-Hoc %ssupported\n", ch_info->channel,
+                                il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+                                CHECK_AND_PRINT_I(VALID),
+                                CHECK_AND_PRINT_I(IBSS),
+                                CHECK_AND_PRINT_I(ACTIVE),
+                                CHECK_AND_PRINT_I(RADAR),
+                                CHECK_AND_PRINT_I(WIDE),
+                                CHECK_AND_PRINT_I(DFS),
+                                eeprom_ch_info[ch].flags,
+                                eeprom_ch_info[ch].max_power_avg,
+                                ((eeprom_ch_info[ch].
+                                  flags & EEPROM_CHANNEL_IBSS) &&
+                                 !(eeprom_ch_info[ch].
+                                   flags & EEPROM_CHANNEL_RADAR)) ? "" :
+                                "not ");
+
+                       ch_info++;
+               }
+       }
+
+       /* Check if we do have HT40 channels */
+       if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+           EEPROM_REGULATORY_BAND_NO_HT40 &&
+           il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+           EEPROM_REGULATORY_BAND_NO_HT40)
+               return 0;
+
+       /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+       for (band = 6; band <= 7; band++) {
+               enum ieee80211_band ieeeband;
+
+               il_init_band_reference(il, band, &eeprom_ch_count,
+                                      &eeprom_ch_info, &eeprom_ch_idx);
+
+               /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+               ieeeband =
+                   (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+               /* Loop through each band adding each of the channels */
+               for (ch = 0; ch < eeprom_ch_count; ch++) {
+                       /* Set up driver's info for lower half */
+                       il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+                                             &eeprom_ch_info[ch],
+                                             IEEE80211_CHAN_NO_HT40PLUS);
+
+                       /* Set up driver's info for upper half */
+                       il_mod_ht40_chan_info(il, ieeeband,
+                                             eeprom_ch_idx[ch] + 4,
+                                             &eeprom_ch_info[ch],
+                                             IEEE80211_CHAN_NO_HT40MINUS);
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+       kfree(il->channel_info);
+       il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+                   u16 channel)
+{
+       int i;
+
+       switch (band) {
+       case IEEE80211_BAND_5GHZ:
+               for (i = 14; i < il->channel_count; i++) {
+                       if (il->channel_info[i].channel == channel)
+                               return &il->channel_info[i];
+               }
+               break;
+       case IEEE80211_BAND_2GHZ:
+               if (channel >= 1 && channel <= 14)
+                       return &il->channel_info[channel - 1];
+               break;
+       default:
+               BUG();
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct il_power_vec_entry {
+       struct il_powertable_cmd cmd;
+       u8 no_dtim;             /* number of skip dtim */
+};
+
+static void
+il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+       memset(cmd, 0, sizeof(*cmd));
+
+       if (il->power_data.pci_pm)
+               cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+       D_POWER("Sleep command for CAM\n");
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+       D_POWER("Sending power/sleep command\n");
+       D_POWER("Flags value = 0x%08X\n", cmd->flags);
+       D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+       D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+       D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+               le32_to_cpu(cmd->sleep_interval[0]),
+               le32_to_cpu(cmd->sleep_interval[1]),
+               le32_to_cpu(cmd->sleep_interval[2]),
+               le32_to_cpu(cmd->sleep_interval[3]),
+               le32_to_cpu(cmd->sleep_interval[4]));
+
+       return il_send_cmd_pdu(il, C_POWER_TBL,
+                              sizeof(struct il_powertable_cmd), cmd);
+}
+
+int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+       int ret;
+       bool update_chains;
+
+       lockdep_assert_held(&il->mutex);
+
+       /* Don't update the RX chain when chain noise calibration is running */
+       update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+           il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+       if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+               return 0;
+
+       if (!il_is_ready_rf(il))
+               return -EIO;
+
+       /* scan complete use sleep_power_next, need to be updated */
+       memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+       if (test_bit(S_SCANNING, &il->status) && !force) {
+               D_INFO("Defer power set mode while scanning\n");
+               return 0;
+       }
+
+       if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+               set_bit(S_POWER_PMI, &il->status);
+
+       ret = il_set_power(il, cmd);
+       if (!ret) {
+               if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+                       clear_bit(S_POWER_PMI, &il->status);
+
+               if (il->cfg->ops->lib->update_chain_flags && update_chains)
+                       il->cfg->ops->lib->update_chain_flags(il);
+               else if (il->cfg->ops->lib->update_chain_flags)
+                       D_POWER("Cannot update the power, chain noise "
+                               "calibration running: %d\n",
+                               il->chain_noise_data.state);
+
+               memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+       } else
+               IL_ERR("set power fail, ret = %d", ret);
+
+       return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+       struct il_powertable_cmd cmd;
+
+       il_power_sleep_cam_cmd(il, &cmd);
+       return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+       u16 lctl = il_pcie_link_ctl(il);
+
+       il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+       il->power_data.debug_sleep_level_override = -1;
+
+       memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req.  This should be set long enough to hear probe responses
+ * from more than one AP.  */
+#define IL_ACTIVE_DWELL_TIME_24    (30)        /* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24   (20)        /* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52   (10)
+#define IL_PASSIVE_DWELL_BASE      (100)
+#define IL_CHANNEL_TUNE_TIME       5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+       int ret;
+       struct il_rx_pkt *pkt;
+       struct il_host_cmd cmd = {
+               .id = C_SCAN_ABORT,
+               .flags = CMD_WANT_SKB,
+       };
+
+       /* Exit instantly with error when device is not ready
+        * to receive scan abort command or it does not perform
+        * hardware scan currently */
+       if (!test_bit(S_READY, &il->status) ||
+           !test_bit(S_GEO_CONFIGURED, &il->status) ||
+           !test_bit(S_SCAN_HW, &il->status) ||
+           test_bit(S_FW_ERROR, &il->status) ||
+           test_bit(S_EXIT_PENDING, &il->status))
+               return -EIO;
+
+       ret = il_send_cmd_sync(il, &cmd);
+       if (ret)
+               return ret;
+
+       pkt = (struct il_rx_pkt *)cmd.reply_page;
+       if (pkt->u.status != CAN_ABORT_STATUS) {
+               /* The scan abort will return 1 for success or
+                * 2 for "failure".  A failure condition can be
+                * due to simply not being in an active scan which
+                * can occur if we send the scan abort before we
+                * the microcode has notified us that a scan is
+                * completed. */
+               D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+               ret = -EIO;
+       }
+
+       il_free_pages(il, cmd.reply_page);
+       return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+       /* check if scan was requested from mac80211 */
+       if (il->scan_request) {
+               D_SCAN("Complete scan in mac80211\n");
+               ieee80211_scan_completed(il->hw, aborted);
+       }
+
+       il->scan_vif = NULL;
+       il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+       lockdep_assert_held(&il->mutex);
+
+       if (!test_bit(S_SCANNING, &il->status)) {
+               D_SCAN("Forcing scan end while not scanning\n");
+               return;
+       }
+
+       D_SCAN("Forcing scan end\n");
+       clear_bit(S_SCANNING, &il->status);
+       clear_bit(S_SCAN_HW, &il->status);
+       clear_bit(S_SCAN_ABORTING, &il->status);
+       il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       if (!test_bit(S_SCANNING, &il->status)) {
+               D_SCAN("Not performing scan to abort\n");
+               return;
+       }
+
+       if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+               D_SCAN("Scan abort in progress\n");
+               return;
+       }
+
+       ret = il_send_scan_abort(il);
+       if (ret) {
+               D_SCAN("Send scan abort failed %d\n", ret);
+               il_force_scan_end(il);
+       } else
+               D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+       D_SCAN("Queuing abort scan\n");
+       queue_work(il->workqueue, &il->abort_scan);
+       return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+       unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+       lockdep_assert_held(&il->mutex);
+
+       D_SCAN("Scan cancel timeout\n");
+
+       il_do_scan_abort(il);
+
+       while (time_before_eq(jiffies, timeout)) {
+               if (!test_bit(S_SCAN_HW, &il->status))
+                       break;
+               msleep(20);
+       }
+
+       return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_scanreq_notification *notif =
+           (struct il_scanreq_notification *)pkt->u.raw;
+
+       D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_scanstart_notification *notif =
+           (struct il_scanstart_notification *)pkt->u.raw;
+       il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+       D_SCAN("Scan start: " "%d [802.11%s] "
+              "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+              notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+              le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_scanresults_notification *notif =
+           (struct il_scanresults_notification *)pkt->u.raw;
+
+       D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+              "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+              le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+              le32_to_cpu(notif->stats[0]),
+              le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+       D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+              scan_notif->scanned_channels, scan_notif->tsf_low,
+              scan_notif->tsf_high, scan_notif->status);
+
+       /* The HW is no longer scanning */
+       clear_bit(S_SCAN_HW, &il->status);
+
+       D_SCAN("Scan on %sGHz took %dms\n",
+              (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+              jiffies_to_msecs(jiffies - il->scan_start));
+
+       queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+       /* scan handlers */
+       il->handlers[C_SCAN] = il_hdl_scan;
+       il->handlers[N_SCAN_START] = il_hdl_scan_start;
+       il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+       il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+inline u16
+il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+                        u8 n_probes)
+{
+       if (band == IEEE80211_BAND_5GHZ)
+               return IL_ACTIVE_DWELL_TIME_52 +
+                   IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+       else
+               return IL_ACTIVE_DWELL_TIME_24 +
+                   IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+                         struct ieee80211_vif *vif)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+       u16 value;
+
+       u16 passive =
+           (band ==
+            IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+           IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+           IL_PASSIVE_DWELL_TIME_52;
+
+       if (il_is_any_associated(il)) {
+               /*
+                * If we're associated, we clamp the maximum passive
+                * dwell time to be 98% of the smallest beacon interval
+                * (minus 2 * channel tune time)
+                */
+               value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+               if (value > IL_PASSIVE_DWELL_BASE || !value)
+                       value = IL_PASSIVE_DWELL_BASE;
+               value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+               passive = min(value, passive);
+       }
+
+       return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+       u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+       if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
+               il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+       if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
+               il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+       int ret;
+
+       lockdep_assert_held(&il->mutex);
+
+       if (WARN_ON(!il->cfg->ops->utils->request_scan))
+               return -EOPNOTSUPP;
+
+       cancel_delayed_work(&il->scan_check);
+
+       if (!il_is_ready_rf(il)) {
+               IL_WARN("Request scan called when driver not ready.\n");
+               return -EIO;
+       }
+
+       if (test_bit(S_SCAN_HW, &il->status)) {
+               D_SCAN("Multiple concurrent scan requests in parallel.\n");
+               return -EBUSY;
+       }
+
+       if (test_bit(S_SCAN_ABORTING, &il->status)) {
+               D_SCAN("Scan request while abort pending.\n");
+               return -EBUSY;
+       }
+
+       D_SCAN("Starting scan...\n");
+
+       set_bit(S_SCANNING, &il->status);
+       il->scan_start = jiffies;
+
+       ret = il->cfg->ops->utils->request_scan(il, vif);
+       if (ret) {
+               clear_bit(S_SCANNING, &il->status);
+               return ret;
+       }
+
+       queue_delayed_work(il->workqueue, &il->scan_check,
+                          IL_SCAN_CHECK_WATCHDOG);
+
+       return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+              struct cfg80211_scan_request *req)
+{
+       struct il_priv *il = hw->priv;
+       int ret;
+
+       D_MAC80211("enter\n");
+
+       if (req->n_channels == 0)
+               return -EINVAL;
+
+       mutex_lock(&il->mutex);
+
+       if (test_bit(S_SCANNING, &il->status)) {
+               D_SCAN("Scan already in progress.\n");
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+
+       /* mac80211 will only ask for one band at a time */
+       il->scan_request = req;
+       il->scan_vif = vif;
+       il->scan_band = req->channels[0]->band;
+
+       ret = il_scan_initiate(il, vif);
+
+       D_MAC80211("leave\n");
+
+out_unlock:
+       mutex_unlock(&il->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+       struct il_priv *il =
+           container_of(data, struct il_priv, scan_check.work);
+
+       D_SCAN("Scan check work\n");
+
+       /* Since we are here firmware does not finish scan and
+        * most likely is in bad shape, so we don't bother to
+        * send abort command, just force scan complete to mac80211 */
+       mutex_lock(&il->mutex);
+       il_force_scan_end(il);
+       mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+                 const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+       int len = 0;
+       u8 *pos = NULL;
+
+       /* Make sure there is enough space for the probe request,
+        * two mandatory IEs and the data */
+       left -= 24;
+       if (left < 0)
+               return 0;
+
+       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+       memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+       memcpy(frame->sa, ta, ETH_ALEN);
+       memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+       frame->seq_ctrl = 0;
+
+       len += 24;
+
+       /* ...next IE... */
+       pos = &frame->u.probe_req.variable[0];
+
+       /* fill in our indirect SSID IE */
+       left -= 2;
+       if (left < 0)
+               return 0;
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = 0;
+
+       len += 2;
+
+       if (WARN_ON(left < ie_len))
+               return len;
+
+       if (ies && ie_len) {
+               memcpy(pos, ies, ie_len);
+               len += ie_len;
+       }
+
+       return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+       struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+       D_SCAN("Abort scan work\n");
+
+       /* We keep scan_check work queued in case when firmware will not
+        * report back scan completed notification */
+       mutex_lock(&il->mutex);
+       il_scan_cancel_timeout(il, 200);
+       mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+       struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+       bool aborted;
+
+       D_SCAN("Completed scan.\n");
+
+       cancel_delayed_work(&il->scan_check);
+
+       mutex_lock(&il->mutex);
+
+       aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+       if (aborted)
+               D_SCAN("Aborted scan completed.\n");
+
+       if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+               D_SCAN("Scan already completed.\n");
+               goto out_settings;
+       }
+
+       il_complete_scan(il, aborted);
+
+out_settings:
+       /* Can we still talk to firmware ? */
+       if (!il_is_ready_rf(il))
+               goto out;
+
+       /*
+        * We do not commit power settings while scan is pending,
+        * do it now if the settings changed.
+        */
+       il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+       il_set_tx_power(il, il->tx_power_next, false);
+
+       il->cfg->ops->utils->post_scan(il);
+
+out:
+       mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+       INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+       INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+       INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+       cancel_work_sync(&il->abort_scan);
+       cancel_work_sync(&il->scan_completed);
+
+       if (cancel_delayed_work_sync(&il->scan_check)) {
+               mutex_lock(&il->mutex);
+               il_force_scan_end(il);
+               mutex_unlock(&il->mutex);
+       }
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+       if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+               IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+                      sta_id, il->stations[sta_id].sta.sta.addr);
+
+       if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+               D_ASSOC("STA id %u addr %pM already present"
+                       " in uCode (according to driver)\n", sta_id,
+                       il->stations[sta_id].sta.sta.addr);
+       } else {
+               il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+               D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+                       il->stations[sta_id].sta.sta.addr);
+       }
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+                       struct il_rx_pkt *pkt, bool sync)
+{
+       u8 sta_id = addsta->sta.sta_id;
+       unsigned long flags;
+       int ret = -EIO;
+
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+               return ret;
+       }
+
+       D_INFO("Processing response for adding station %u\n", sta_id);
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       switch (pkt->u.add_sta.status) {
+       case ADD_STA_SUCCESS_MSK:
+               D_INFO("C_ADD_STA PASSED\n");
+               il_sta_ucode_activate(il, sta_id);
+               ret = 0;
+               break;
+       case ADD_STA_NO_ROOM_IN_TBL:
+               IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+               break;
+       case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+               IL_ERR("Adding station %d failed, no block ack resource.\n",
+                      sta_id);
+               break;
+       case ADD_STA_MODIFY_NON_EXIST_STA:
+               IL_ERR("Attempting to modify non-existing station %d\n",
+                      sta_id);
+               break;
+       default:
+               D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+               break;
+       }
+
+       D_INFO("%s station id %u addr %pM\n",
+              il->stations[sta_id].sta.mode ==
+              STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+              il->stations[sta_id].sta.sta.addr);
+
+       /*
+        * XXX: The MAC address in the command buffer is often changed from
+        * the original sent to the device. That is, the MAC address
+        * written to the command buffer often is not the same MAC address
+        * read from the command buffer when the command returns. This
+        * issue has not yet been resolved and this debugging is left to
+        * observe the problem.
+        */
+       D_INFO("%s station according to cmd buffer %pM\n",
+              il->stations[sta_id].sta.mode ==
+              STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+                   struct il_rx_pkt *pkt)
+{
+       struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+       il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+       struct il_rx_pkt *pkt = NULL;
+       int ret = 0;
+       u8 data[sizeof(*sta)];
+       struct il_host_cmd cmd = {
+               .id = C_ADD_STA,
+               .flags = flags,
+               .data = data,
+       };
+       u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+       D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+              flags & CMD_ASYNC ? "a" : "");
+
+       if (flags & CMD_ASYNC)
+               cmd.callback = il_add_sta_callback;
+       else {
+               cmd.flags |= CMD_WANT_SKB;
+               might_sleep();
+       }
+
+       cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+       ret = il_send_cmd(il, &cmd);
+
+       if (ret || (flags & CMD_ASYNC))
+               return ret;
+
+       if (ret == 0) {
+               pkt = (struct il_rx_pkt *)cmd.reply_page;
+               ret = il_process_add_sta_resp(il, sta, pkt, true);
+       }
+       il_free_pages(il, cmd.reply_page);
+
+       return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
+                     struct il_rxon_context *ctx)
+{
+       struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+       __le32 sta_flags;
+       u8 mimo_ps_mode;
+
+       if (!sta || !sta_ht_inf->ht_supported)
+               goto done;
+
+       mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+       D_ASSOC("spatial multiplexing power save mode: %s\n",
+               (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
+               (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+               "disabled");
+
+       sta_flags = il->stations[idx].sta.station_flags;
+
+       sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+       switch (mimo_ps_mode) {
+       case WLAN_HT_CAP_SM_PS_STATIC:
+               sta_flags |= STA_FLG_MIMO_DIS_MSK;
+               break;
+       case WLAN_HT_CAP_SM_PS_DYNAMIC:
+               sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+               break;
+       case WLAN_HT_CAP_SM_PS_DISABLED:
+               break;
+       default:
+               IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+               break;
+       }
+
+       sta_flags |=
+           cpu_to_le32((u32) sta_ht_inf->
+                       ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+       sta_flags |=
+           cpu_to_le32((u32) sta_ht_inf->
+                       ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+       if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+               sta_flags |= STA_FLG_HT40_EN_MSK;
+       else
+               sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+       il->stations[idx].sta.station_flags = sta_flags;
+done:
+       return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+               const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+       struct il_station_entry *station;
+       int i;
+       u8 sta_id = IL_INVALID_STATION;
+       u16 rate;
+
+       if (is_ap)
+               sta_id = ctx->ap_sta_id;
+       else if (is_broadcast_ether_addr(addr))
+               sta_id = ctx->bcast_sta_id;
+       else
+               for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+                       if (!compare_ether_addr
+                           (il->stations[i].sta.sta.addr, addr)) {
+                               sta_id = i;
+                               break;
+                       }
+
+                       if (!il->stations[i].used &&
+                           sta_id == IL_INVALID_STATION)
+                               sta_id = i;
+               }
+
+       /*
+        * These two conditions have the same outcome, but keep them
+        * separate
+        */
+       if (unlikely(sta_id == IL_INVALID_STATION))
+               return sta_id;
+
+       /*
+        * uCode is not able to deal with multiple requests to add a
+        * station. Keep track if one is in progress so that we do not send
+        * another.
+        */
+       if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+               D_INFO("STA %d already in process of being added.\n", sta_id);
+               return sta_id;
+       }
+
+       if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+           (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+           !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+               D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+                       sta_id, addr);
+               return sta_id;
+       }
+
+       station = &il->stations[sta_id];
+       station->used = IL_STA_DRIVER_ACTIVE;
+       D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+       il->num_stations++;
+
+       /* Set up the C_ADD_STA command to send to device */
+       memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+       memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+       station->sta.mode = 0;
+       station->sta.sta.sta_id = sta_id;
+       station->sta.station_flags = ctx->station_flags;
+       station->ctxid = ctx->ctxid;
+
+       if (sta) {
+               struct il_station_priv_common *sta_priv;
+
+               sta_priv = (void *)sta->drv_priv;
+               sta_priv->ctx = ctx;
+       }
+
+       /*
+        * OK to call unconditionally, since local stations (IBSS BSSID
+        * STA and broadcast STA) pass in a NULL sta, and mac80211
+        * doesn't allow HT IBSS.
+        */
+       il_set_ht_add_station(il, sta_id, sta, ctx);
+
+       /* 3945 only */
+       rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+       /* Turn on both antennas for the station... */
+       station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+       return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+                     const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
+                     u8 *sta_id_r)
+{
+       unsigned long flags_spin;
+       int ret = 0;
+       u8 sta_id;
+       struct il_addsta_cmd sta_cmd;
+
+       *sta_id_r = 0;
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Unable to prepare station %pM for addition\n", addr);
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+               return -EINVAL;
+       }
+
+       /*
+        * uCode is not able to deal with multiple requests to add a
+        * station. Keep track if one is in progress so that we do not send
+        * another.
+        */
+       if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+               D_INFO("STA %d already in process of being added.\n", sta_id);
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+               return -EEXIST;
+       }
+
+       if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+           (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+               D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+                       sta_id, addr);
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+               return -EEXIST;
+       }
+
+       il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+              sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+       /* Add station to device's station table */
+       ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+       if (ret) {
+               spin_lock_irqsave(&il->sta_lock, flags_spin);
+               IL_ERR("Adding station %pM failed.\n",
+                      il->stations[sta_id].sta.sta.addr);
+               il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+               il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+       }
+       *sta_id_r = sta_id;
+       return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+       /* Ucode must be active and driver must be non active */
+       if ((il->stations[sta_id].
+            used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+           IL_STA_UCODE_ACTIVE)
+               IL_ERR("removed non active STA %u\n", sta_id);
+
+       il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+       memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+       D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+                      bool temporary)
+{
+       struct il_rx_pkt *pkt;
+       int ret;
+
+       unsigned long flags_spin;
+       struct il_rem_sta_cmd rm_sta_cmd;
+
+       struct il_host_cmd cmd = {
+               .id = C_REM_STA,
+               .len = sizeof(struct il_rem_sta_cmd),
+               .flags = CMD_SYNC,
+               .data = &rm_sta_cmd,
+       };
+
+       memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+       rm_sta_cmd.num_sta = 1;
+       memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+       cmd.flags |= CMD_WANT_SKB;
+
+       ret = il_send_cmd(il, &cmd);
+
+       if (ret)
+               return ret;
+
+       pkt = (struct il_rx_pkt *)cmd.reply_page;
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+               ret = -EIO;
+       }
+
+       if (!ret) {
+               switch (pkt->u.rem_sta.status) {
+               case REM_STA_SUCCESS_MSK:
+                       if (!temporary) {
+                               spin_lock_irqsave(&il->sta_lock, flags_spin);
+                               il_sta_ucode_deactivate(il, sta_id);
+                               spin_unlock_irqrestore(&il->sta_lock,
+                                                      flags_spin);
+                       }
+                       D_ASSOC("C_REM_STA PASSED\n");
+                       break;
+               default:
+                       ret = -EIO;
+                       IL_ERR("C_REM_STA failed\n");
+                       break;
+               }
+       }
+       il_free_pages(il, cmd.reply_page);
+
+       return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+       unsigned long flags;
+
+       if (!il_is_ready(il)) {
+               D_INFO("Unable to remove station %pM, device not ready.\n",
+                      addr);
+               /*
+                * It is typical for stations to be removed when we are
+                * going down. Return success since device will be down
+                * soon anyway
+                */
+               return 0;
+       }
+
+       D_ASSOC("Removing STA from driver:%d  %pM\n", sta_id, addr);
+
+       if (WARN_ON(sta_id == IL_INVALID_STATION))
+               return -EINVAL;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+
+       if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+               D_INFO("Removing %pM but non DRIVER active\n", addr);
+               goto out_err;
+       }
+
+       if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+               D_INFO("Removing %pM but non UCODE active\n", addr);
+               goto out_err;
+       }
+
+       if (il->stations[sta_id].used & IL_STA_LOCAL) {
+               kfree(il->stations[sta_id].lq);
+               il->stations[sta_id].lq = NULL;
+       }
+
+       il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+       il->num_stations--;
+
+       BUG_ON(il->num_stations < 0);
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+
+       return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       int i;
+       unsigned long flags_spin;
+       bool cleared = false;
+
+       D_INFO("Clearing ucode stations in driver\n");
+
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if (ctx && ctx->ctxid != il->stations[i].ctxid)
+                       continue;
+
+               if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+                       D_INFO("Clearing ucode active for station %d\n", i);
+                       il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+                       cleared = true;
+               }
+       }
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+       if (!cleared)
+               D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       struct il_addsta_cmd sta_cmd;
+       struct il_link_quality_cmd lq;
+       unsigned long flags_spin;
+       int i;
+       bool found = false;
+       int ret;
+       bool send_lq;
+
+       if (!il_is_ready(il)) {
+               D_INFO("Not ready yet, not restoring any stations.\n");
+               return;
+       }
+
+       D_ASSOC("Restoring all known stations ... start.\n");
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if (ctx->ctxid != il->stations[i].ctxid)
+                       continue;
+               if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+                   !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+                       D_ASSOC("Restoring sta %pM\n",
+                               il->stations[i].sta.sta.addr);
+                       il->stations[i].sta.mode = 0;
+                       il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+                       found = true;
+               }
+       }
+
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+                       memcpy(&sta_cmd, &il->stations[i].sta,
+                              sizeof(struct il_addsta_cmd));
+                       send_lq = false;
+                       if (il->stations[i].lq) {
+                               memcpy(&lq, il->stations[i].lq,
+                                      sizeof(struct il_link_quality_cmd));
+                               send_lq = true;
+                       }
+                       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+                       ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+                       if (ret) {
+                               spin_lock_irqsave(&il->sta_lock, flags_spin);
+                               IL_ERR("Adding station %pM failed.\n",
+                                      il->stations[i].sta.sta.addr);
+                               il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+                               il->stations[i].used &=
+                                   ~IL_STA_UCODE_INPROGRESS;
+                               spin_unlock_irqrestore(&il->sta_lock,
+                                                      flags_spin);
+                       }
+                       /*
+                        * Rate scaling has already been initialized, send
+                        * current LQ command
+                        */
+                       if (send_lq)
+                               il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+                       spin_lock_irqsave(&il->sta_lock, flags_spin);
+                       il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+               }
+       }
+
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+       if (!found)
+               D_INFO("Restoring all known stations"
+                      " .... no stations to be restored.\n");
+       else
+               D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+       int i;
+
+       for (i = 0; i < il->sta_key_max_num; i++)
+               if (!test_and_set_bit(i, &il->ucode_key_table))
+                       return i;
+
+       return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if (!(il->stations[i].used & IL_STA_BCAST))
+                       continue;
+
+               il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+               il->num_stations--;
+               BUG_ON(il->num_stations < 0);
+               kfree(il->stations[i].lq);
+               il->stations[i].lq = NULL;
+       }
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+       int i;
+       D_RATE("lq station id 0x%x\n", lq->sta_id);
+       D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+              lq->general_params.dual_stream_ant_msk);
+
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+               D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
+                    struct il_link_quality_cmd *lq)
+{
+       int i;
+
+       if (ctx->ht.enabled)
+               return true;
+
+       D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+               if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+                       D_INFO("idx %d of LQ expects HT channel\n", i);
+                       return false;
+               }
+       }
+       return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+              struct il_link_quality_cmd *lq, u8 flags, bool init)
+{
+       int ret = 0;
+       unsigned long flags_spin;
+
+       struct il_host_cmd cmd = {
+               .id = C_TX_LINK_QUALITY_CMD,
+               .len = sizeof(struct il_link_quality_cmd),
+               .flags = flags,
+               .data = lq,
+       };
+
+       if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+               return -EINVAL;
+
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+               return -EINVAL;
+       }
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+       il_dump_lq_cmd(il, lq);
+       BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+       if (il_is_lq_table_valid(il, ctx, lq))
+               ret = il_send_cmd(il, &cmd);
+       else
+               ret = -EINVAL;
+
+       if (cmd.flags & CMD_ASYNC)
+               return ret;
+
+       if (init) {
+               D_INFO("init LQ command complete,"
+                      " clearing sta addition status for sta %d\n",
+                      lq->sta_id);
+               spin_lock_irqsave(&il->sta_lock, flags_spin);
+               il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 struct ieee80211_sta *sta)
+{
+       struct il_priv *il = hw->priv;
+       struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+       int ret;
+
+       D_INFO("received request to remove station %pM\n", sta->addr);
+       mutex_lock(&il->mutex);
+       D_INFO("proceeding to remove station %pM\n", sta->addr);
+       ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+       if (ret)
+               IL_ERR("Error removing station %pM\n", sta->addr);
+       mutex_unlock(&il->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt.  The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ IDX is updated (updating the
+ *   'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc()   Allocates rx_free
+ * il_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE idx.  If insufficient rx_free buffers
+ *                            are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx()         Detach il_rx_bufs from pool up to the
+ *                            READ IDX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls il_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+       int s = q->read - q->write;
+       if (s <= 0)
+               s += RX_QUEUE_SIZE;
+       /* keep some buffer to not confuse full and empty queue */
+       s -= 2;
+       if (s < 0)
+               s = 0;
+       return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+       unsigned long flags;
+       u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+       u32 reg;
+
+       spin_lock_irqsave(&q->lock, flags);
+
+       if (q->need_update == 0)
+               goto exit_unlock;
+
+       /* If power-saving is in use, make sure device is awake */
+       if (test_bit(S_POWER_PMI, &il->status)) {
+               reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+                              reg);
+                       il_set_bit(il, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       goto exit_unlock;
+               }
+
+               q->write_actual = (q->write & ~0x7);
+               il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+               /* Else device is assumed to be awake */
+       } else {
+               /* Device expects a multiple of 8 */
+               q->write_actual = (q->write & ~0x7);
+               il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+       }
+
+       q->need_update = 0;
+
+exit_unlock:
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+       struct il_rx_queue *rxq = &il->rxq;
+       struct device *dev = &il->pci_dev->dev;
+       int i;
+
+       spin_lock_init(&rxq->lock);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+
+       /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+       rxq->bd =
+           dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+                              GFP_KERNEL);
+       if (!rxq->bd)
+               goto err_bd;
+
+       rxq->rb_stts =
+           dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+                              &rxq->rb_stts_dma, GFP_KERNEL);
+       if (!rxq->rb_stts)
+               goto err_rb;
+
+       /* Fill the rx_used queue with _all_ of the Rx buffers */
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+       /* Set us so that we have processed and used all buffers, but have
+        * not restocked the Rx queue with fresh buffers */
+       rxq->read = rxq->write = 0;
+       rxq->write_actual = 0;
+       rxq->free_count = 0;
+       rxq->need_update = 0;
+       return 0;
+
+err_rb:
+       dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+                         rxq->bd_dma);
+err_bd:
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+       if (!report->state) {
+               D_11H("Spectrum Measure Notification: Start\n");
+               return;
+       }
+
+       memcpy(&il->measure_report, report, sizeof(*report));
+       il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+                     u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+       u16 fc = le16_to_cpu(hdr->frame_control);
+
+       /*
+        * All contexts have the same setting here due to it being
+        * a module parameter, so OK to check any context.
+        */
+       if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+               return 0;
+
+       if (!(fc & IEEE80211_FCTL_PROTECTED))
+               return 0;
+
+       D_RX("decrypt_res:0x%x\n", decrypt_res);
+       switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+       case RX_RES_STATUS_SEC_TYPE_TKIP:
+               /* The uCode has got a bad phase 1 Key, pushes the packet.
+                * Decryption will be done in SW. */
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_BAD_KEY_TTAK)
+                       break;
+
+       case RX_RES_STATUS_SEC_TYPE_WEP:
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_BAD_ICV_MIC) {
+                       /* bad ICV, the packet is destroyed since the
+                        * decryption is inplace, drop it */
+                       D_RX("Packet destroyed\n");
+                       return -1;
+               }
+       case RX_RES_STATUS_SEC_TYPE_CCMP:
+               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+                   RX_RES_STATUS_DECRYPT_OK) {
+                       D_RX("hw decrypt successfully!!!\n");
+                       stats->flag |= RX_FLAG_DECRYPTED;
+               }
+               break;
+
+       default:
+               break;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+       u32 reg = 0;
+       int txq_id = txq->q.id;
+
+       if (txq->need_update == 0)
+               return;
+
+       /* if we're trying to save power */
+       if (test_bit(S_POWER_PMI, &il->status)) {
+               /* wake up nic if it's powered down ...
+                * uCode will wake up, and interrupt us again, so next
+                * time we'll skip this part. */
+               reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+                              txq_id, reg);
+                       il_set_bit(il, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       return;
+               }
+
+               il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+               /*
+                * else not in power-save mode,
+                * uCode will never sleep when we're
+                * trying to tx (during RFKILL, we're not trying to tx).
+                */
+       } else
+               _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+       txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct il_queue *q = &txq->q;
+
+       if (q->n_bd == 0)
+               return;
+
+       while (q->write_ptr != q->read_ptr) {
+               il->cfg->ops->lib->txq_free_tfd(il, txq);
+               q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct device *dev = &il->pci_dev->dev;
+       int i;
+
+       il_tx_queue_unmap(il, txq_id);
+
+       /* De-alloc array of command/tx buffers */
+       for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+               kfree(txq->cmd[i]);
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->q.n_bd)
+               dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+                                 txq->tfds, txq->q.dma_addr);
+
+       /* De-alloc array of per-TFD driver data */
+       kfree(txq->txb);
+       txq->txb = NULL;
+
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
+       /* 0-fill queue descriptor structure */
+       memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+       struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+       struct il_queue *q = &txq->q;
+       int i;
+
+       if (q->n_bd == 0)
+               return;
+
+       while (q->read_ptr != q->write_ptr) {
+               i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+               if (txq->meta[i].flags & CMD_MAPPED) {
+                       pci_unmap_single(il->pci_dev,
+                                        dma_unmap_addr(&txq->meta[i], mapping),
+                                        dma_unmap_len(&txq->meta[i], len),
+                                        PCI_DMA_BIDIRECTIONAL);
+                       txq->meta[i].flags = 0;
+               }
+
+               q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+       }
+
+       i = q->n_win;
+       if (txq->meta[i].flags & CMD_MAPPED) {
+               pci_unmap_single(il->pci_dev,
+                                dma_unmap_addr(&txq->meta[i], mapping),
+                                dma_unmap_len(&txq->meta[i], len),
+                                PCI_DMA_BIDIRECTIONAL);
+               txq->meta[i].flags = 0;
+       }
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+       struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+       struct device *dev = &il->pci_dev->dev;
+       int i;
+
+       il_cmd_queue_unmap(il);
+
+       /* De-alloc array of command/tx buffers */
+       for (i = 0; i <= TFD_CMD_SLOTS; i++)
+               kfree(txq->cmd[i]);
+
+       /* De-alloc circular buffer of TFDs */
+       if (txq->q.n_bd)
+               dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+                                 txq->tfds, txq->q.dma_addr);
+
+       /* deallocate arrays */
+       kfree(txq->cmd);
+       kfree(txq->meta);
+       txq->cmd = NULL;
+       txq->meta = NULL;
+
+       /* 0-fill queue descriptor structure */
+       memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+       int s = q->read_ptr - q->write_ptr;
+
+       if (q->read_ptr > q->write_ptr)
+               s -= q->n_bd;
+
+       if (s <= 0)
+               s += q->n_win;
+       /* keep some reserve to not confuse empty and full situations */
+       s -= 2;
+       if (s < 0)
+               s = 0;
+       return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
+             u32 id)
+{
+       q->n_bd = count;
+       q->n_win = slots_num;
+       q->id = id;
+
+       /* count must be power-of-two size, otherwise il_queue_inc_wrap
+        * and il_queue_dec_wrap are broken. */
+       BUG_ON(!is_power_of_2(count));
+
+       /* slots_num must be power-of-two size, otherwise
+        * il_get_cmd_idx is broken. */
+       BUG_ON(!is_power_of_2(slots_num));
+
+       q->low_mark = q->n_win / 4;
+       if (q->low_mark < 4)
+               q->low_mark = 4;
+
+       q->high_mark = q->n_win / 8;
+       if (q->high_mark < 2)
+               q->high_mark = 2;
+
+       q->write_ptr = q->read_ptr = 0;
+
+       return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+       struct device *dev = &il->pci_dev->dev;
+       size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+       /* Driver ilate data, only for Tx (not command) queues,
+        * not shared with device. */
+       if (id != il->cmd_queue) {
+               txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+                                  GFP_KERNEL);
+               if (!txq->txb) {
+                       IL_ERR("kmalloc for auxiliary BD "
+                              "structures failed\n");
+                       goto error;
+               }
+       } else {
+               txq->txb = NULL;
+       }
+
+       /* Circular buffer of transmit frame descriptors (TFDs),
+        * shared with device */
+       txq->tfds =
+           dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+       if (!txq->tfds) {
+               IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+               goto error;
+       }
+       txq->q.id = id;
+
+       return 0;
+
+error:
+       kfree(txq->txb);
+       txq->txb = NULL;
+
+       return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+                u32 txq_id)
+{
+       int i, len;
+       int ret;
+       int actual_slots = slots_num;
+
+       /*
+        * Alloc buffer array for commands (Tx or other types of commands).
+        * For the command queue (#4/#9), allocate command space + one big
+        * command for scan, since scan command is very huge; the system will
+        * not have two scans at the same time, so only one is needed.
+        * For normal Tx queues (all other queues), no super-size command
+        * space is needed.
+        */
+       if (txq_id == il->cmd_queue)
+               actual_slots++;
+
+       txq->meta =
+           kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+       txq->cmd =
+           kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+
+       if (!txq->meta || !txq->cmd)
+               goto out_free_arrays;
+
+       len = sizeof(struct il_device_cmd);
+       for (i = 0; i < actual_slots; i++) {
+               /* only happens for cmd queue */
+               if (i == slots_num)
+                       len = IL_MAX_CMD_SIZE;
+
+               txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+               if (!txq->cmd[i])
+                       goto err;
+       }
+
+       /* Alloc driver data array and TFD circular buffer */
+       ret = il_tx_queue_alloc(il, txq, txq_id);
+       if (ret)
+               goto err;
+
+       txq->need_update = 0;
+
+       /*
+        * For the default queues 0-3, set up the swq_id
+        * already -- all others need to get one later
+        * (if they need one at all).
+        */
+       if (txq_id < 4)
+               il_set_swq_id(txq, txq_id, txq_id);
+
+       /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+        * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
+       BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+       /* Initialize queue's high/low-water marks, and head/tail idxes */
+       il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       il->cfg->ops->lib->txq_init(il, txq);
+
+       return 0;
+err:
+       for (i = 0; i < actual_slots; i++)
+               kfree(txq->cmd[i]);
+out_free_arrays:
+       kfree(txq->meta);
+       kfree(txq->cmd);
+
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+                 u32 txq_id)
+{
+       int actual_slots = slots_num;
+
+       if (txq_id == il->cmd_queue)
+               actual_slots++;
+
+       memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+
+       txq->need_update = 0;
+
+       /* Initialize queue's high/low-water marks, and head/tail idxes */
+       il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+       /* Tell device where to find queue */
+       il->cfg->ops->lib->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+       struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+       struct il_queue *q = &txq->q;
+       struct il_device_cmd *out_cmd;
+       struct il_cmd_meta *out_meta;
+       dma_addr_t phys_addr;
+       unsigned long flags;
+       int len;
+       u32 idx;
+       u16 fix_size;
+
+       cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+       fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+       /* If any of the command structures end up being larger than
+        * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+        * we will need to increase the size of the TFD entries
+        * Also, check to see if command buffer should not exceed the size
+        * of device_cmd and max_cmd_size. */
+       BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+              !(cmd->flags & CMD_SIZE_HUGE));
+       BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+       if (il_is_rfkill(il) || il_is_ctkill(il)) {
+               IL_WARN("Not sending command - %s KILL\n",
+                       il_is_rfkill(il) ? "RF" : "CT");
+               return -EIO;
+       }
+
+       spin_lock_irqsave(&il->hcmd_lock, flags);
+
+       if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+               spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+               IL_ERR("Restarting adapter due to command queue full\n");
+               queue_work(il->workqueue, &il->restart);
+               return -ENOSPC;
+       }
+
+       idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+       out_cmd = txq->cmd[idx];
+       out_meta = &txq->meta[idx];
+
+       if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+               spin_unlock_irqrestore(&il->hcmd_lock, flags);
+               return -ENOSPC;
+       }
+
+       memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+       out_meta->flags = cmd->flags | CMD_MAPPED;
+       if (cmd->flags & CMD_WANT_SKB)
+               out_meta->source = cmd;
+       if (cmd->flags & CMD_ASYNC)
+               out_meta->callback = cmd->callback;
+
+       out_cmd->hdr.cmd = cmd->id;
+       memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+       /* At this point, the out_cmd now has all of the incoming cmd
+        * information */
+
+       out_cmd->hdr.flags = 0;
+       out_cmd->hdr.sequence =
+           cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+       if (cmd->flags & CMD_SIZE_HUGE)
+               out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+       len = sizeof(struct il_device_cmd);
+       if (idx == TFD_CMD_SLOTS)
+               len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       switch (out_cmd->hdr.cmd) {
+       case C_TX_LINK_QUALITY_CMD:
+       case C_SENSITIVITY:
+               D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+                         "%d bytes at %d[%d]:%d\n",
+                         il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+                         le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+                         q->write_ptr, idx, il->cmd_queue);
+               break;
+       default:
+               D_HC("Sending command %s (#%x), seq: 0x%04X, "
+                    "%d bytes at %d[%d]:%d\n",
+                    il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+                    le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+                    idx, il->cmd_queue);
+       }
+#endif
+       txq->need_update = 1;
+
+       if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+               /* Set up entry in queue's byte count circular buffer */
+               il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+
+       phys_addr =
+           pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+                          PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_addr_set(out_meta, mapping, phys_addr);
+       dma_unmap_len_set(out_meta, len, fix_size);
+
+       il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
+                                                1, U32_PAD(cmd->len));
+
+       /* Increment and update queue's write idx */
+       q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+       il_txq_update_write_ptr(il, txq);
+
+       spin_unlock_irqrestore(&il->hcmd_lock, flags);
+       return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+       struct il_tx_queue *txq = &il->txq[txq_id];
+       struct il_queue *q = &txq->q;
+       int nfreed = 0;
+
+       if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+               IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+                      "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+                      q->write_ptr, q->read_ptr);
+               return;
+       }
+
+       for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+            q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+               if (nfreed++ > 0) {
+                       IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+                              q->write_ptr, q->read_ptr);
+                       queue_work(il->workqueue, &il->restart);
+               }
+
+       }
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed.  The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+       int txq_id = SEQ_TO_QUEUE(sequence);
+       int idx = SEQ_TO_IDX(sequence);
+       int cmd_idx;
+       bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+       struct il_device_cmd *cmd;
+       struct il_cmd_meta *meta;
+       struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+       unsigned long flags;
+
+       /* If a Tx command is being handled and it isn't in the actual
+        * command queue then there a command routing bug has been introduced
+        * in the queue management code. */
+       if (WARN
+           (txq_id != il->cmd_queue,
+            "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+            txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+            il->txq[il->cmd_queue].q.write_ptr)) {
+               il_print_hex_error(il, pkt, 32);
+               return;
+       }
+
+       cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+       cmd = txq->cmd[cmd_idx];
+       meta = &txq->meta[cmd_idx];
+
+       txq->time_stamp = jiffies;
+
+       pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+                        dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+       /* Input error checking is done when commands are added to queue. */
+       if (meta->flags & CMD_WANT_SKB) {
+               meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+               rxb->page = NULL;
+       } else if (meta->callback)
+               meta->callback(il, cmd, pkt);
+
+       spin_lock_irqsave(&il->hcmd_lock, flags);
+
+       il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+       if (!(meta->flags & CMD_ASYNC)) {
+               clear_bit(S_HCMD_ACTIVE, &il->status);
+               D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+                      il_get_cmd_string(cmd->hdr.cmd));
+               wake_up(&il->wait_command_queue);
+       }
+
+       /* Mark as unmapped */
+       meta->flags = 0;
+
+       spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+/* This function both allocates and initializes hw and il. */
+struct ieee80211_hw *
+il_alloc_all(struct il_cfg *cfg)
+{
+       struct il_priv *il;
+       /* mac80211 allocates memory for this device instance, including
+        *   space for this driver's ilate structure */
+       struct ieee80211_hw *hw;
+
+       hw = ieee80211_alloc_hw(sizeof(struct il_priv),
+                               cfg->ops->ieee80211_ops);
+       if (hw == NULL) {
+               pr_err("%s: Can not allocate network device\n", cfg->name);
+               goto out;
+       }
+
+       il = hw->priv;
+       il->hw = hw;
+
+out:
+       return hw;
+}
+EXPORT_SYMBOL(il_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150        /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+                   struct ieee80211_sta_ht_cap *ht_info,
+                   enum ieee80211_band band)
+{
+       u16 max_bit_rate = 0;
+       u8 rx_chains_num = il->hw_params.rx_chains_num;
+       u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+       ht_info->cap = 0;
+       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+       ht_info->ht_supported = true;
+
+       ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+       max_bit_rate = MAX_BIT_RATE_20_MHZ;
+       if (il->hw_params.ht40_channel & BIT(band)) {
+               ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+               ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+               ht_info->mcs.rx_mask[4] = 0x01;
+               max_bit_rate = MAX_BIT_RATE_40_MHZ;
+       }
+
+       if (il->cfg->mod_params->amsdu_size_8K)
+               ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+       ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+       ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+       ht_info->mcs.rx_mask[0] = 0xFF;
+       if (rx_chains_num >= 2)
+               ht_info->mcs.rx_mask[1] = 0xFF;
+       if (rx_chains_num >= 3)
+               ht_info->mcs.rx_mask[2] = 0xFF;
+
+       /* Highest supported Rx data rate */
+       max_bit_rate *= rx_chains_num;
+       WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+       ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+       /* Tx MCS capabilities */
+       ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       if (tx_chains_num != rx_chains_num) {
+               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+               ht_info->mcs.tx_params |=
+                   ((tx_chains_num -
+                     1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+       }
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+       struct il_channel_info *ch;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *channels;
+       struct ieee80211_channel *geo_ch;
+       struct ieee80211_rate *rates;
+       int i = 0;
+       s8 max_tx_power = 0;
+
+       if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+           il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+               D_INFO("Geography modes already initialized.\n");
+               set_bit(S_GEO_CONFIGURED, &il->status);
+               return 0;
+       }
+
+       channels =
+           kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+                   GFP_KERNEL);
+       if (!channels)
+               return -ENOMEM;
+
+       rates =
+           kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+                   GFP_KERNEL);
+       if (!rates) {
+               kfree(channels);
+               return -ENOMEM;
+       }
+
+       /* 5.2GHz channels start after the 2.4GHz channels */
+       sband = &il->bands[IEEE80211_BAND_5GHZ];
+       sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+       /* just OFDM */
+       sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+       sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+       if (il->cfg->sku & IL_SKU_N)
+               il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+       sband = &il->bands[IEEE80211_BAND_2GHZ];
+       sband->channels = channels;
+       /* OFDM & CCK */
+       sband->bitrates = rates;
+       sband->n_bitrates = RATE_COUNT_LEGACY;
+
+       if (il->cfg->sku & IL_SKU_N)
+               il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+       il->ieee_channels = channels;
+       il->ieee_rates = rates;
+
+       for (i = 0; i < il->channel_count; i++) {
+               ch = &il->channel_info[i];
+
+               if (!il_is_channel_valid(ch))
+                       continue;
+
+               sband = &il->bands[ch->band];
+
+               geo_ch = &sband->channels[sband->n_channels++];
+
+               geo_ch->center_freq =
+                   ieee80211_channel_to_frequency(ch->channel, ch->band);
+               geo_ch->max_power = ch->max_power_avg;
+               geo_ch->max_antenna_gain = 0xff;
+               geo_ch->hw_value = ch->channel;
+
+               if (il_is_channel_valid(ch)) {
+                       if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+                               geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+                       if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+                               geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+                       if (ch->flags & EEPROM_CHANNEL_RADAR)
+                               geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+                       geo_ch->flags |= ch->ht40_extension_channel;
+
+                       if (ch->max_power_avg > max_tx_power)
+                               max_tx_power = ch->max_power_avg;
+               } else {
+                       geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+               }
+
+               D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+                      geo_ch->center_freq,
+                      il_is_channel_a_band(ch) ? "5.2" : "2.4",
+                      geo_ch->
+                      flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+                      geo_ch->flags);
+       }
+
+       il->tx_power_device_lmt = max_tx_power;
+       il->tx_power_user_lmt = max_tx_power;
+       il->tx_power_next = max_tx_power;
+
+       if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+           (il->cfg->sku & IL_SKU_A)) {
+               IL_INFO("Incorrectly detected BG card as ABG. "
+                       "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+                       il->pci_dev->device, il->pci_dev->subsystem_device);
+               il->cfg->sku &= ~IL_SKU_A;
+       }
+
+       IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+               il->bands[IEEE80211_BAND_2GHZ].n_channels,
+               il->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+       set_bit(S_GEO_CONFIGURED, &il->status);
+
+       return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+       kfree(il->ieee_channels);
+       kfree(il->ieee_rates);
+       clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+                       u16 channel, u8 extension_chan_offset)
+{
+       const struct il_channel_info *ch_info;
+
+       ch_info = il_get_channel_info(il, band, channel);
+       if (!il_is_channel_valid(ch_info))
+               return false;
+
+       if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+               return !(ch_info->
+                        ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+       else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+               return !(ch_info->
+                        ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+       return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+                     struct ieee80211_sta_ht_cap *ht_cap)
+{
+       if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+               return false;
+
+       /*
+        * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+        * the bit will not set if it is pure 40MHz case
+        */
+       if (ht_cap && !ht_cap->ht_supported)
+               return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       if (il->disable_ht40)
+               return false;
+#endif
+
+       return il_is_channel_extension(il, il->band,
+                                      le16_to_cpu(ctx->staging.channel),
+                                      ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+       u16 new_val;
+       u16 beacon_factor;
+
+       /*
+        * If mac80211 hasn't given us a beacon interval, program
+        * the default into the device.
+        */
+       if (!beacon_val)
+               return DEFAULT_BEACON_INTERVAL;
+
+       /*
+        * If the beacon interval we obtained from the peer
+        * is too large, we'll have to wake up more often
+        * (and in IBSS case, we'll beacon too much)
+        *
+        * For example, if max_beacon_val is 4096, and the
+        * requested beacon interval is 7000, we'll have to
+        * use 3500 to be able to wake up on the beacons.
+        *
+        * This could badly influence beacon detection stats.
+        */
+
+       beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+       new_val = beacon_val / beacon_factor;
+
+       if (!new_val)
+               new_val = max_beacon_val;
+
+       return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       u64 tsf;
+       s32 interval_tm, rem;
+       struct ieee80211_conf *conf = NULL;
+       u16 beacon_int;
+       struct ieee80211_vif *vif = ctx->vif;
+
+       conf = &il->hw->conf;
+
+       lockdep_assert_held(&il->mutex);
+
+       memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+       ctx->timing.timestamp = cpu_to_le64(il->timestamp);
+       ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+       beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+       /*
+        * TODO: For IBSS we need to get atim_win from mac80211,
+        *       for now just always use 0
+        */
+       ctx->timing.atim_win = 0;
+
+       beacon_int =
+           il_adjust_beacon_interval(beacon_int,
+                                     il->hw_params.max_beacon_itrvl *
+                                     TIME_UNIT);
+       ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+       tsf = il->timestamp;    /* tsf is modifed by do_div: copy it */
+       interval_tm = beacon_int * TIME_UNIT;
+       rem = do_div(tsf, interval_tm);
+       ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+       ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+       D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+               le16_to_cpu(ctx->timing.beacon_interval),
+               le32_to_cpu(ctx->timing.beacon_init_val),
+               le16_to_cpu(ctx->timing.atim_win));
+
+       return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
+                              &ctx->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+                    int hw_decrypt)
+{
+       struct il_rxon_cmd *rxon = &ctx->staging;
+
+       if (hw_decrypt)
+               rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+       else
+               rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       struct il_rxon_cmd *rxon = &ctx->staging;
+       bool error = false;
+
+       if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+               if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+                       IL_WARN("check 2.4G: wrong narrow\n");
+                       error = true;
+               }
+               if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+                       IL_WARN("check 2.4G: wrong radar\n");
+                       error = true;
+               }
+       } else {
+               if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+                       IL_WARN("check 5.2G: not short slot!\n");
+                       error = true;
+               }
+               if (rxon->flags & RXON_FLG_CCK_MSK) {
+                       IL_WARN("check 5.2G: CCK!\n");
+                       error = true;
+               }
+       }
+       if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+               IL_WARN("mac/bssid mcast!\n");
+               error = true;
+       }
+
+       /* make sure basic rates 6Mbps and 1Mbps are supported */
+       if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+           (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+               IL_WARN("neither 1 nor 6 are basic\n");
+               error = true;
+       }
+
+       if (le16_to_cpu(rxon->assoc_id) > 2007) {
+               IL_WARN("aid > 2007\n");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+           (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+               IL_WARN("CCK and short slot\n");
+               error = true;
+       }
+
+       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+           (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+               IL_WARN("CCK and auto detect");
+               error = true;
+       }
+
+       if ((rxon->
+            flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+           RXON_FLG_TGG_PROTECT_MSK) {
+               IL_WARN("TGg but no auto-detect\n");
+               error = true;
+       }
+
+       if (error)
+               IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+       if (error) {
+               IL_ERR("Invalid RXON\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       const struct il_rxon_cmd *staging = &ctx->staging;
+       const struct il_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond)                                                      \
+       if ((cond)) {                                                   \
+               D_INFO("need full RXON - " #cond "\n"); \
+               return 1;                                               \
+       }
+
+#define CHK_NEQ(c1, c2)                                                \
+       if ((c1) != (c2)) {                                     \
+               D_INFO("need full RXON - "      \
+                              #c1 " != " #c2 " - %d != %d\n",  \
+                              (c1), (c2));                     \
+               return 1;                                       \
+       }
+
+       /* These items are only settable from the full RXON command */
+       CHK(!il_is_associated_ctx(ctx));
+       CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+       CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+       CHK(compare_ether_addr
+           (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+       CHK_NEQ(staging->dev_type, active->dev_type);
+       CHK_NEQ(staging->channel, active->channel);
+       CHK_NEQ(staging->air_propagation, active->air_propagation);
+       CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+               active->ofdm_ht_single_stream_basic_rates);
+       CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+               active->ofdm_ht_dual_stream_basic_rates);
+       CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+       /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+        * be updated with the RXON_ASSOC command -- however only some
+        * flag transitions are allowed using RXON_ASSOC */
+
+       /* Check if we are not switching bands */
+       CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+               active->flags & RXON_FLG_BAND_24G_MSK);
+
+       /* Check if we are switching association toggle */
+       CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+               active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+       return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       /*
+        * Assign the lowest rate -- should really get this from
+        * the beacon skb from mac80211.
+        */
+       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+               return RATE_1M_PLCP;
+       else
+               return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
+               struct il_rxon_context *ctx)
+{
+       struct il_rxon_cmd *rxon = &ctx->staging;
+
+       if (!ctx->ht.enabled) {
+               rxon->flags &=
+                   ~(RXON_FLG_CHANNEL_MODE_MSK |
+                     RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+                     | RXON_FLG_HT_PROT_MSK);
+               return;
+       }
+
+       rxon->flags |=
+           cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+       /* Set up channel bandwidth:
+        * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+       /* clear the HT channel mode before set the mode */
+       rxon->flags &=
+           ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+       if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+               /* pure ht40 */
+               if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+                       rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+                       /* Note: control channel is opposite of extension channel */
+                       switch (ctx->ht.extension_chan_offset) {
+                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                               rxon->flags &=
+                                   ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                               rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               break;
+                       }
+               } else {
+                       /* Note: control channel is opposite of extension channel */
+                       switch (ctx->ht.extension_chan_offset) {
+                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+                               rxon->flags &=
+                                   ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+                               rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+                               break;
+                       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+                       default:
+                               /* channel location only valid if in Mixed mode */
+                               IL_ERR("invalid extension channel offset\n");
+                               break;
+                       }
+               }
+       } else {
+               rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+       }
+
+       if (il->cfg->ops->hcmd->set_rxon_chain)
+               il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+       D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+               "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+               ctx->ht.protection, ctx->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+       _il_set_rxon_ht(il, ht_conf, &il->ctx);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+{
+       const struct il_channel_info *ch_info;
+       int i;
+       u8 channel = 0;
+       u8 min, max;
+
+       if (band == IEEE80211_BAND_5GHZ) {
+               min = 14;
+               max = il->channel_count;
+       } else {
+               min = 0;
+               max = 14;
+       }
+
+       for (i = min; i < max; i++) {
+               channel = il->channel_info[i].channel;
+               if (channel == le16_to_cpu(il->ctx.staging.channel))
+                       continue;
+
+               ch_info = il_get_channel_info(il, band, channel);
+               if (il_is_channel_valid(ch_info))
+                       break;
+       }
+
+       return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+                   struct il_rxon_context *ctx)
+{
+       enum ieee80211_band band = ch->band;
+       u16 channel = ch->hw_value;
+
+       if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+               return 0;
+
+       ctx->staging.channel = cpu_to_le16(channel);
+       if (band == IEEE80211_BAND_5GHZ)
+               ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+       else
+               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+       il->band = band;
+
+       D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+       return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+                     enum ieee80211_band band, struct ieee80211_vif *vif)
+{
+       if (band == IEEE80211_BAND_5GHZ) {
+               ctx->staging.flags &=
+                   ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+                     RXON_FLG_CCK_MSK);
+               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+       } else {
+               /* Copied from il_post_associate() */
+               if (vif && vif->bss_conf.use_short_slot)
+                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+               ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+               ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+       }
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       const struct il_channel_info *ch_info;
+
+       memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+       if (!ctx->vif) {
+               ctx->staging.dev_type = ctx->unused_devtype;
+       } else
+               switch (ctx->vif->type) {
+
+               case NL80211_IFTYPE_STATION:
+                       ctx->staging.dev_type = ctx->station_devtype;
+                       ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+                       break;
+
+               case NL80211_IFTYPE_ADHOC:
+                       ctx->staging.dev_type = ctx->ibss_devtype;
+                       ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+                       ctx->staging.filter_flags =
+                           RXON_FILTER_BCON_AWARE_MSK |
+                           RXON_FILTER_ACCEPT_GRP_MSK;
+                       break;
+
+               default:
+                       IL_ERR("Unsupported interface type %d\n",
+                              ctx->vif->type);
+                       break;
+               }
+
+#if 0
+       /* TODO:  Figure out when short_preamble would be set and cache from
+        * that */
+       if (!hw_to_local(il->hw)->short_preamble)
+               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       else
+               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+       ch_info =
+           il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+
+       if (!ch_info)
+               ch_info = &il->channel_info[0];
+
+       ctx->staging.channel = cpu_to_le16(ch_info->channel);
+       il->band = ch_info->band;
+
+       il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+
+       ctx->staging.ofdm_basic_rates =
+           (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+       ctx->staging.cck_basic_rates =
+           (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+       /* clear both MIX and PURE40 mode flag */
+       ctx->staging.flags &=
+           ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+       if (ctx->vif)
+               memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+       ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+       ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+       const struct ieee80211_supported_band *hw = NULL;
+       struct ieee80211_rate *rate;
+       int i;
+
+       hw = il_get_hw_mode(il, il->band);
+       if (!hw) {
+               IL_ERR("Failed to set rate: unable to get hw mode\n");
+               return;
+       }
+
+       il->active_rate = 0;
+
+       for (i = 0; i < hw->n_bitrates; i++) {
+               rate = &(hw->bitrates[i]);
+               if (rate->hw_value < RATE_COUNT_LEGACY)
+                       il->active_rate |= (1 << rate->hw_value);
+       }
+
+       D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+       il->ctx.staging.cck_basic_rates =
+           (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+       il->ctx.staging.ofdm_basic_rates =
+           (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+       struct il_rxon_context *ctx = &il->ctx;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+               ieee80211_chswitch_done(ctx->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_csa_notification *csa = &(pkt->u.csa_notif);
+
+       struct il_rxon_context *ctx = &il->ctx;
+       struct il_rxon_cmd *rxon = (void *)&ctx->active;
+
+       if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+               return;
+
+       if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+               rxon->channel = csa->channel;
+               ctx->staging.channel = csa->channel;
+               D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+               il_chswitch_done(il, true);
+       } else {
+               IL_ERR("CSA notif (fail) : channel %d\n",
+                      le16_to_cpu(csa->channel));
+               il_chswitch_done(il, false);
+       }
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       struct il_rxon_cmd *rxon = &ctx->staging;
+
+       D_RADIO("RX CONFIG:\n");
+       il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+       D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+       D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+       D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+       D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+       D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+       D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+       D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+       D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+       D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+       /* Set the FW error flag -- cleared on il_down */
+       set_bit(S_FW_ERROR, &il->status);
+
+       /* Cancel currently queued command. */
+       clear_bit(S_HCMD_ACTIVE, &il->status);
+
+       IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+       il->cfg->ops->lib->dump_nic_error_log(il);
+       if (il->cfg->ops->lib->dump_fh)
+               il->cfg->ops->lib->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+       if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+               il_print_rx_config_cmd(il, &il->ctx);
+#endif
+
+       wake_up(&il->wait_command_queue);
+
+       /* Keep the restart process from trying to send host
+        * commands by clearing the INIT status bit */
+       clear_bit(S_READY, &il->status);
+
+       if (!test_bit(S_EXIT_PENDING, &il->status)) {
+               IL_DBG(IL_DL_FW_ERRORS,
+                      "Restarting adapter due to uCode error.\n");
+
+               if (il->cfg->mod_params->restart_fw)
+                       queue_work(il->workqueue, &il->restart);
+       }
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+il_apm_stop_master(struct il_priv *il)
+{
+       int ret = 0;
+
+       /* stop device's busmaster DMA activity */
+       il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+       ret =
+           _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+                        CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+       if (ret)
+               IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+       D_INFO("stop master\n");
+
+       return ret;
+}
+
+void
+il_apm_stop(struct il_priv *il)
+{
+       D_INFO("Stop card, put in low power state\n");
+
+       /* Stop device's DMA activity */
+       il_apm_stop_master(il);
+
+       /* Reset the entire device */
+       il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+       udelay(10);
+
+       /*
+        * Clear "initialization complete" bit to move adapter from
+        * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+        */
+       il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+       int ret = 0;
+       u16 lctl;
+
+       D_INFO("Init card's basic functions\n");
+
+       /*
+        * Use "set_bit" below rather than "write", to preserve any hardware
+        * bits already set by default after reset.
+        */
+
+       /* Disable L0S exit timer (platform NMI Work/Around) */
+       il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+                  CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+       /*
+        * Disable L0s without affecting L1;
+        *  don't wait for ICH L0s (ICH bug W/A)
+        */
+       il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+                  CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+       /* Set FH wait threshold to maximum (HW error during stress W/A) */
+       il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+       /*
+        * Enable HAP INTA (interrupt from management bus) to
+        * wake device's PCI Express link L1a -> L0s
+        * NOTE:  This is no-op for 3945 (non-existent bit)
+        */
+       il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+                  CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+       /*
+        * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+        * Check if BIOS (or OS) enabled L1-ASPM on this device.
+        * If so (likely), disable L0S, so device moves directly L0->L1;
+        *    costs negligible amount of power savings.
+        * If not (unlikely), enable L0S, so there is at least some
+        *    power savings, even without L1.
+        */
+       if (il->cfg->base_params->set_l0s) {
+               lctl = il_pcie_link_ctl(il);
+               if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+                   PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+                       /* L1-ASPM enabled; disable(!) L0S  */
+                       il_set_bit(il, CSR_GIO_REG,
+                                  CSR_GIO_REG_VAL_L0S_ENABLED);
+                       D_POWER("L1 Enabled; Disabling L0S\n");
+               } else {
+                       /* L1-ASPM disabled; enable(!) L0S */
+                       il_clear_bit(il, CSR_GIO_REG,
+                                    CSR_GIO_REG_VAL_L0S_ENABLED);
+                       D_POWER("L1 Disabled; Enabling L0S\n");
+               }
+       }
+
+       /* Configure analog phase-lock-loop before activating to D0A */
+       if (il->cfg->base_params->pll_cfg_val)
+               il_set_bit(il, CSR_ANA_PLL_CFG,
+                          il->cfg->base_params->pll_cfg_val);
+
+       /*
+        * Set "initialization complete" bit to move adapter from
+        * D0U* --> D0A* (powered-up active) state.
+        */
+       il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       /*
+        * Wait for clock stabilization; once stabilized, access to
+        * device-internal resources is supported, e.g. il_wr_prph()
+        * and accesses to uCode SRAM.
+        */
+       ret =
+           _il_poll_bit(il, CSR_GP_CNTRL,
+                        CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                        CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+       if (ret < 0) {
+               D_INFO("Failed to init the card\n");
+               goto out;
+       }
+
+       /*
+        * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+        * BSM (Boostrap State Machine) is only in 3945 and 4965.
+        *
+        * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+        * do not disable clocks.  This preserves any hardware bits already
+        * set by default in "CLK_CTRL_REG" after reset.
+        */
+       if (il->cfg->base_params->use_bsm)
+               il_wr_prph(il, APMG_CLK_EN_REG,
+                          APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+       else
+               il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+       udelay(20);
+
+       /* Disable L1-Active */
+       il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+                        APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+       return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+       int ret;
+       s8 prev_tx_power;
+       bool defer;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       lockdep_assert_held(&il->mutex);
+
+       if (il->tx_power_user_lmt == tx_power && !force)
+               return 0;
+
+       if (!il->cfg->ops->lib->send_tx_power)
+               return -EOPNOTSUPP;
+
+       /* 0 dBm mean 1 milliwatt */
+       if (tx_power < 0) {
+               IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+               return -EINVAL;
+       }
+
+       if (tx_power > il->tx_power_device_lmt) {
+               IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+                       tx_power, il->tx_power_device_lmt);
+               return -EINVAL;
+       }
+
+       if (!il_is_ready_rf(il))
+               return -EIO;
+
+       /* scan complete and commit_rxon use tx_power_next value,
+        * it always need to be updated for newest request */
+       il->tx_power_next = tx_power;
+
+       /* do not set tx power when scanning or channel changing */
+       defer = test_bit(S_SCANNING, &il->status) ||
+           memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+       if (defer && !force) {
+               D_INFO("Deferring tx power set\n");
+               return 0;
+       }
+
+       prev_tx_power = il->tx_power_user_lmt;
+       il->tx_power_user_lmt = tx_power;
+
+       ret = il->cfg->ops->lib->send_tx_power(il);
+
+       /* if fail to set tx_power, restore the orig. tx power */
+       if (ret) {
+               il->tx_power_user_lmt = prev_tx_power;
+               il->tx_power_next = prev_tx_power;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+       struct il_bt_cmd bt_cmd = {
+               .lead_time = BT_LEAD_TIME_DEF,
+               .max_kill = BT_MAX_KILL_DEF,
+               .kill_ack_mask = 0,
+               .kill_cts_mask = 0,
+       };
+
+       if (!bt_coex_active)
+               bt_cmd.flags = BT_COEX_DISABLE;
+       else
+               bt_cmd.flags = BT_COEX_ENABLE;
+
+       D_INFO("BT coex %s\n",
+              (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+       if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+               IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+       struct il_stats_cmd stats_cmd = {
+               .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+       };
+
+       if (flags & CMD_ASYNC)
+               return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+                                            &stats_cmd, NULL);
+       else
+               return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+                                      &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+       D_RX("sleep mode: %d, src: %d\n",
+            sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+       u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+       D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+               il_get_cmd_string(pkt->hdr.cmd));
+       il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+       struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+       IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+              "seq 0x%04X ser 0x%08X\n",
+              le32_to_cpu(pkt->u.err_resp.error_type),
+              il_get_cmd_string(pkt->u.err_resp.cmd_id),
+              pkt->u.err_resp.cmd_id,
+              le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+              le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+       memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+              const struct ieee80211_tx_queue_params *params)
+{
+       struct il_priv *il = hw->priv;
+       unsigned long flags;
+       int q;
+
+       D_MAC80211("enter\n");
+
+       if (!il_is_ready_rf(il)) {
+               D_MAC80211("leave - RF not ready\n");
+               return -EIO;
+       }
+
+       if (queue >= AC_NUM) {
+               D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+               return 0;
+       }
+
+       q = AC_NUM - 1 - queue;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+           cpu_to_le16(params->cw_min);
+       il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+           cpu_to_le16(params->cw_max);
+       il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+       il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+           cpu_to_le16((params->txop * 32));
+
+       il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       D_MAC80211("leave\n");
+       return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       struct il_priv *il = hw->priv;
+
+       return il->ibss_manager == IL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       il_connection_init_rx_config(il, ctx);
+
+       if (il->cfg->ops->hcmd->set_rxon_chain)
+               il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+       return il_commit_rxon(il, ctx);
+}
+
+static int
+il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       struct ieee80211_vif *vif = ctx->vif;
+       int err;
+
+       lockdep_assert_held(&il->mutex);
+
+       /*
+        * This variable will be correct only when there's just
+        * a single context, but all code using it is for hardware
+        * that supports only one context.
+        */
+       il->iw_mode = vif->type;
+
+       ctx->is_active = true;
+
+       err = il_set_mode(il, ctx);
+       if (err) {
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+               return err;
+       }
+
+       return 0;
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct il_priv *il = hw->priv;
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+       int err;
+       u32 modes;
+
+       D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+       mutex_lock(&il->mutex);
+
+       if (!il_is_ready_rf(il)) {
+               IL_WARN("Try to add interface when device not ready\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* check if busy context is exclusive */
+       if (il->ctx.vif &&
+           (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
+       if (!(modes & BIT(vif->type))) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       vif_priv->ctx = &il->ctx;
+       il->ctx.vif = vif;
+
+       err = il_setup_interface(il, &il->ctx);
+       if (err) {
+               il->ctx.vif = NULL;
+               il->iw_mode = NL80211_IFTYPE_STATION;
+       }
+
+out:
+       mutex_unlock(&il->mutex);
+
+       D_MAC80211("leave\n");
+       return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+                     bool mode_change)
+{
+       struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+       lockdep_assert_held(&il->mutex);
+
+       if (il->scan_vif == vif) {
+               il_scan_cancel_timeout(il, 200);
+               il_force_scan_end(il);
+       }
+
+       if (!mode_change) {
+               il_set_mode(il, ctx);
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+       }
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct il_priv *il = hw->priv;
+       struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+       D_MAC80211("enter\n");
+
+       mutex_lock(&il->mutex);
+
+       WARN_ON(ctx->vif != vif);
+       ctx->vif = NULL;
+
+       il_teardown_interface(il, vif, false);
+
+       memset(il->bssid, 0, ETH_ALEN);
+       mutex_unlock(&il->mutex);
+
+       D_MAC80211("leave\n");
+
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+       if (!il->txq)
+               il->txq =
+                   kzalloc(sizeof(struct il_tx_queue) *
+                           il->cfg->base_params->num_of_queues, GFP_KERNEL);
+       if (!il->txq) {
+               IL_ERR("Not enough memory for txq\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_txq_mem(struct il_priv *il)
+{
+       kfree(il->txq);
+       il->txq = NULL;
+}
+EXPORT_SYMBOL(il_txq_mem);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+
+#define IL_TRAFFIC_DUMP_SIZE   (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
+
+void
+il_reset_traffic_log(struct il_priv *il)
+{
+       il->tx_traffic_idx = 0;
+       il->rx_traffic_idx = 0;
+       if (il->tx_traffic)
+               memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+       if (il->rx_traffic)
+               memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+}
+
+int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+       u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
+
+       if (il_debug_level & IL_DL_TX) {
+               if (!il->tx_traffic) {
+                       il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+                       if (!il->tx_traffic)
+                               return -ENOMEM;
+               }
+       }
+       if (il_debug_level & IL_DL_RX) {
+               if (!il->rx_traffic) {
+                       il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+                       if (!il->rx_traffic)
+                               return -ENOMEM;
+               }
+       }
+       il_reset_traffic_log(il);
+       return 0;
+}
+EXPORT_SYMBOL(il_alloc_traffic_mem);
+
+void
+il_free_traffic_mem(struct il_priv *il)
+{
+       kfree(il->tx_traffic);
+       il->tx_traffic = NULL;
+
+       kfree(il->rx_traffic);
+       il->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(il_free_traffic_mem);
+
+void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+                        struct ieee80211_hdr *header)
+{
+       __le16 fc;
+       u16 len;
+
+       if (likely(!(il_debug_level & IL_DL_TX)))
+               return;
+
+       if (!il->tx_traffic)
+               return;
+
+       fc = header->frame_control;
+       if (ieee80211_is_data(fc)) {
+               len =
+                   (length >
+                    IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+               memcpy((il->tx_traffic +
+                       (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+                      len);
+               il->tx_traffic_idx =
+                   (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+       }
+}
+EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
+
+void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+                        struct ieee80211_hdr *header)
+{
+       __le16 fc;
+       u16 len;
+
+       if (likely(!(il_debug_level & IL_DL_RX)))
+               return;
+
+       if (!il->rx_traffic)
+               return;
+
+       fc = header->frame_control;
+       if (ieee80211_is_data(fc)) {
+               len =
+                   (length >
+                    IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+               memcpy((il->rx_traffic +
+                       (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+                      len);
+               il->rx_traffic_idx =
+                   (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+       }
+}
+EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
+
+const char *
+il_get_mgmt_string(int cmd)
+{
+       switch (cmd) {
+               IL_CMD(MANAGEMENT_ASSOC_REQ);
+               IL_CMD(MANAGEMENT_ASSOC_RESP);
+               IL_CMD(MANAGEMENT_REASSOC_REQ);
+               IL_CMD(MANAGEMENT_REASSOC_RESP);
+               IL_CMD(MANAGEMENT_PROBE_REQ);
+               IL_CMD(MANAGEMENT_PROBE_RESP);
+               IL_CMD(MANAGEMENT_BEACON);
+               IL_CMD(MANAGEMENT_ATIM);
+               IL_CMD(MANAGEMENT_DISASSOC);
+               IL_CMD(MANAGEMENT_AUTH);
+               IL_CMD(MANAGEMENT_DEAUTH);
+               IL_CMD(MANAGEMENT_ACTION);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+
+const char *
+il_get_ctrl_string(int cmd)
+{
+       switch (cmd) {
+               IL_CMD(CONTROL_BACK_REQ);
+               IL_CMD(CONTROL_BACK);
+               IL_CMD(CONTROL_PSPOLL);
+               IL_CMD(CONTROL_RTS);
+               IL_CMD(CONTROL_CTS);
+               IL_CMD(CONTROL_ACK);
+               IL_CMD(CONTROL_CFEND);
+               IL_CMD(CONTROL_CFENDACK);
+       default:
+               return "UNKNOWN";
+
+       }
+}
+
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+       memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+       memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLEGACY_DEBUGFS defined,
+ * il_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_stats
+ * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of il_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+       struct traffic_stats *stats;
+
+       if (is_tx)
+               stats = &il->tx_stats;
+       else
+               stats = &il->rx_stats;
+
+       if (ieee80211_is_mgmt(fc)) {
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+                       stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+                       stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+                       stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+                       stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+                       stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_BEACON):
+                       stats->mgmt[MANAGEMENT_BEACON]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ATIM):
+                       stats->mgmt[MANAGEMENT_ATIM]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+                       stats->mgmt[MANAGEMENT_DISASSOC]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+                       stats->mgmt[MANAGEMENT_AUTH]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+                       stats->mgmt[MANAGEMENT_DEAUTH]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ACTION):
+                       stats->mgmt[MANAGEMENT_ACTION]++;
+                       break;
+               }
+       } else if (ieee80211_is_ctl(fc)) {
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+                       stats->ctrl[CONTROL_BACK_REQ]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_BACK):
+                       stats->ctrl[CONTROL_BACK]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+                       stats->ctrl[CONTROL_PSPOLL]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_RTS):
+                       stats->ctrl[CONTROL_RTS]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CTS):
+                       stats->ctrl[CONTROL_CTS]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_ACK):
+                       stats->ctrl[CONTROL_ACK]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CFEND):
+                       stats->ctrl[CONTROL_CFEND]++;
+                       break;
+               case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+                       stats->ctrl[CONTROL_CFENDACK]++;
+                       break;
+               }
+       } else {
+               /* data */
+               stats->data_cnt++;
+               stats->data_bytes += len;
+       }
+}
+EXPORT_SYMBOL(il_update_stats);
+#endif
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+       struct il_force_reset *force_reset;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return -EINVAL;
+
+       force_reset = &il->force_reset;
+       force_reset->reset_request_count++;
+       if (!external) {
+               if (force_reset->last_force_reset_jiffies &&
+                   time_after(force_reset->last_force_reset_jiffies +
+                              force_reset->reset_duration, jiffies)) {
+                       D_INFO("force reset rejected\n");
+                       force_reset->reset_reject_count++;
+                       return -EAGAIN;
+               }
+       }
+       force_reset->reset_success_count++;
+       force_reset->last_force_reset_jiffies = jiffies;
+
+       /*
+        * if the request is from external(ex: debugfs),
+        * then always perform the request in regardless the module
+        * parameter setting
+        * if the request is from internal (uCode error or driver
+        * detect failure), then fw_restart module parameter
+        * need to be check before performing firmware reload
+        */
+
+       if (!external && !il->cfg->mod_params->restart_fw) {
+               D_INFO("Cancel firmware reload based on "
+                      "module parameter setting\n");
+               return 0;
+       }
+
+       IL_ERR("On demand firmware reload\n");
+
+       /* Set the FW error flag -- cleared on il_down */
+       set_bit(S_FW_ERROR, &il->status);
+       wake_up(&il->wait_command_queue);
+       /*
+        * Keep the restart process from trying to send host
+        * commands by clearing the INIT status bit
+        */
+       clear_bit(S_READY, &il->status);
+       queue_work(il->workqueue, &il->restart);
+
+       return 0;
+}
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       enum nl80211_iftype newtype, bool newp2p)
+{
+       struct il_priv *il = hw->priv;
+       struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+       u32 modes;
+       int err;
+
+       newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+       mutex_lock(&il->mutex);
+
+       if (!ctx->vif || !il_is_ready_rf(il)) {
+               /*
+                * Huh? But wait ... this can maybe happen when
+                * we're in the middle of a firmware restart!
+                */
+               err = -EBUSY;
+               goto out;
+       }
+
+       modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+       if (!(modes & BIT(newtype))) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
+           (il->ctx.exclusive_interface_modes & BIT(newtype))) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* success */
+       il_teardown_interface(il, vif, true);
+       vif->type = newtype;
+       vif->p2p = newp2p;
+       err = il_setup_interface(il, ctx);
+       WARN_ON(err);
+       /*
+        * We've switched internally, but submitting to the
+        * device may have failed for some reason. Mask this
+        * error, because otherwise mac80211 will not switch
+        * (and set the interface type back) and we'll be
+        * out of sync with it.
+        */
+       err = 0;
+
+out:
+       mutex_unlock(&il->mutex);
+       return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+       struct il_tx_queue *txq = &il->txq[cnt];
+       struct il_queue *q = &txq->q;
+       unsigned long timeout;
+       int ret;
+
+       if (q->read_ptr == q->write_ptr) {
+               txq->time_stamp = jiffies;
+               return 0;
+       }
+
+       timeout =
+           txq->time_stamp +
+           msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+
+       if (time_after(jiffies, timeout)) {
+               IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+                      il->cfg->base_params->wd_timeout);
+               ret = il_force_reset(il, false);
+               return (ret == -EAGAIN) ? 0 : 1;
+       }
+
+       return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(unsigned long data)
+{
+       struct il_priv *il = (struct il_priv *)data;
+       int cnt;
+       unsigned long timeout;
+
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       timeout = il->cfg->base_params->wd_timeout;
+       if (timeout == 0)
+               return;
+
+       /* monitor and check for stuck cmd queue */
+       if (il_check_stuck_queue(il, il->cmd_queue))
+               return;
+
+       /* monitor and check for other stuck queues */
+       if (il_is_any_associated(il)) {
+               for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+                       /* skip as we already checked the command queue */
+                       if (cnt == il->cmd_queue)
+                               continue;
+                       if (il_check_stuck_queue(il, cnt))
+                               return;
+               }
+       }
+
+       mod_timer(&il->watchdog,
+                 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+       unsigned int timeout = il->cfg->base_params->wd_timeout;
+
+       if (timeout)
+               mod_timer(&il->watchdog,
+                         jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+       else
+               del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+       u32 quot;
+       u32 rem;
+       u32 interval = beacon_interval * TIME_UNIT;
+
+       if (!interval || !usec)
+               return 0;
+
+       quot =
+           (usec /
+            interval) & (il_beacon_time_mask_high(il,
+                                                  il->hw_params.
+                                                  beacon_time_tsf_bits) >> il->
+                         hw_params.beacon_time_tsf_bits);
+       rem =
+           (usec % interval) & il_beacon_time_mask_low(il,
+                                                       il->hw_params.
+                                                       beacon_time_tsf_bits);
+
+       return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+                  u32 beacon_interval)
+{
+       u32 base_low = base & il_beacon_time_mask_low(il,
+                                                     il->hw_params.
+                                                     beacon_time_tsf_bits);
+       u32 addon_low = addon & il_beacon_time_mask_low(il,
+                                                       il->hw_params.
+                                                       beacon_time_tsf_bits);
+       u32 interval = beacon_interval * TIME_UNIT;
+       u32 res = (base & il_beacon_time_mask_high(il,
+                                                  il->hw_params.
+                                                  beacon_time_tsf_bits)) +
+           (addon & il_beacon_time_mask_high(il,
+                                             il->hw_params.
+                                             beacon_time_tsf_bits));
+
+       if (base_low > addon_low)
+               res += base_low - addon_low;
+       else if (base_low < addon_low) {
+               res += interval + base_low - addon_low;
+               res += (1 << il->hw_params.beacon_time_tsf_bits);
+       } else
+               res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+       return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int
+il_pci_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct il_priv *il = pci_get_drvdata(pdev);
+
+       /*
+        * This function is called when system goes into suspend state
+        * mac80211 will call il_mac_stop() from the mac80211 suspend function
+        * first but since il_mac_stop() has no knowledge of who the caller is,
+        * it will not call apm_ops.stop() to stop the DMA operation.
+        * Calling apm_ops.stop here to make sure we stop the DMA.
+        */
+       il_apm_stop(il);
+
+       return 0;
+}
+EXPORT_SYMBOL(il_pci_suspend);
+
+int
+il_pci_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct il_priv *il = pci_get_drvdata(pdev);
+       bool hw_rfkill = false;
+
+       /*
+        * We disable the RETRY_TIMEOUT register (0x41) to keep
+        * PCI Tx retries from interfering with C3 CPU state.
+        */
+       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+       il_enable_interrupts(il);
+
+       if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+               hw_rfkill = true;
+
+       if (hw_rfkill)
+               set_bit(S_RF_KILL_HW, &il->status);
+       else
+               clear_bit(S_RF_KILL_HW, &il->status);
+
+       wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+       return 0;
+}
+EXPORT_SYMBOL(il_pci_resume);
+
+const struct dev_pm_ops il_pm_ops = {
+       .suspend = il_pci_suspend,
+       .resume = il_pci_resume,
+       .freeze = il_pci_suspend,
+       .thaw = il_pci_resume,
+       .poweroff = il_pci_suspend,
+       .restore = il_pci_resume,
+};
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       if (test_bit(S_EXIT_PENDING, &il->status))
+               return;
+
+       if (!ctx->is_active)
+               return;
+
+       ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+       if (ctx->qos_data.qos_active)
+               ctx->qos_data.def_qos_parm.qos_flags |=
+                   QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+       if (ctx->ht.enabled)
+               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+       D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+             ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+
+       il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
+                             &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct il_priv *il = hw->priv;
+       const struct il_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = conf->channel;
+       struct il_ht_config *ht_conf = &il->current_ht_config;
+       struct il_rxon_context *ctx = &il->ctx;
+       unsigned long flags = 0;
+       int ret = 0;
+       u16 ch;
+       int scan_active = 0;
+       bool ht_changed = false;
+
+       if (WARN_ON(!il->cfg->ops->legacy))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&il->mutex);
+
+       D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+                  changed);
+
+       if (unlikely(test_bit(S_SCANNING, &il->status))) {
+               scan_active = 1;
+               D_MAC80211("scan active\n");
+       }
+
+       if (changed &
+           (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+               /* mac80211 uses static for non-HT which is what we want */
+               il->current_ht_config.smps = conf->smps_mode;
+
+               /*
+                * Recalculate chain counts.
+                *
+                * If monitor mode is enabled then mac80211 will
+                * set up the SM PS mode to OFF if an HT channel is
+                * configured.
+                */
+               if (il->cfg->ops->hcmd->set_rxon_chain)
+                       il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+       }
+
+       /* during scanning mac80211 will delay channel setting until
+        * scan finish with changed = 0
+        */
+       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+               if (scan_active)
+                       goto set_ch_out;
+
+               ch = channel->hw_value;
+               ch_info = il_get_channel_info(il, channel->band, ch);
+               if (!il_is_channel_valid(ch_info)) {
+                       D_MAC80211("leave - invalid channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
+               if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+                   !il_is_channel_ibss(ch_info)) {
+                       D_MAC80211("leave - not IBSS channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
+               spin_lock_irqsave(&il->lock, flags);
+
+               /* Configure HT40 channels */
+               if (ctx->ht.enabled != conf_is_ht(conf)) {
+                       ctx->ht.enabled = conf_is_ht(conf);
+                       ht_changed = true;
+               }
+               if (ctx->ht.enabled) {
+                       if (conf_is_ht40_minus(conf)) {
+                               ctx->ht.extension_chan_offset =
+                                   IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                               ctx->ht.is_40mhz = true;
+                       } else if (conf_is_ht40_plus(conf)) {
+                               ctx->ht.extension_chan_offset =
+                                   IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                               ctx->ht.is_40mhz = true;
+                       } else {
+                               ctx->ht.extension_chan_offset =
+                                   IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                               ctx->ht.is_40mhz = false;
+                       }
+               } else
+                       ctx->ht.is_40mhz = false;
+
+               /*
+                * Default to no protection. Protection mode will
+                * later be set from BSS config in il_ht_conf
+                */
+               ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+               /* if we are switching from ht to 2.4 clear flags
+                * from any ht related info since 2.4 does not
+                * support ht */
+               if ((le16_to_cpu(ctx->staging.channel) != ch))
+                       ctx->staging.flags = 0;
+
+               il_set_rxon_channel(il, channel, ctx);
+               il_set_rxon_ht(il, ht_conf);
+
+               il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+               spin_unlock_irqrestore(&il->lock, flags);
+
+               if (il->cfg->ops->legacy->update_bcast_stations)
+                       ret = il->cfg->ops->legacy->update_bcast_stations(il);
+
+set_ch_out:
+               /* The list of supported rates and rate mask can be different
+                * for each band; since the band may have changed, reset
+                * the rate mask to what mac80211 lists */
+               il_set_rate(il);
+       }
+
+       if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+               ret = il_power_update_mode(il, false);
+               if (ret)
+                       D_MAC80211("Error setting sleep level\n");
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+                          conf->power_level);
+
+               il_set_tx_power(il, conf->power_level, false);
+       }
+
+       if (!il_is_ready(il)) {
+               D_MAC80211("leave - not ready\n");
+               goto out;
+       }
+
+       if (scan_active)
+               goto out;
+
+       if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+               il_commit_rxon(il, ctx);
+       else
+               D_INFO("Not re-sending same RXON configuration.\n");
+       if (ht_changed)
+               il_update_qos(il, ctx);
+
+out:
+       D_MAC80211("leave\n");
+       mutex_unlock(&il->mutex);
+       return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct il_priv *il = hw->priv;
+       unsigned long flags;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       if (WARN_ON(!il->cfg->ops->legacy))
+               return;
+
+       mutex_lock(&il->mutex);
+       D_MAC80211("enter\n");
+
+       spin_lock_irqsave(&il->lock, flags);
+       memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* new association get rid of ibss beacon skb */
+       if (il->beacon_skb)
+               dev_kfree_skb(il->beacon_skb);
+
+       il->beacon_skb = NULL;
+
+       il->timestamp = 0;
+
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       il_scan_cancel_timeout(il, 100);
+       if (!il_is_ready_rf(il)) {
+               D_MAC80211("leave - not ready\n");
+               mutex_unlock(&il->mutex);
+               return;
+       }
+
+       /* we are restarting association process
+        * clear RXON_FILTER_ASSOC_MSK bit
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       il_commit_rxon(il, ctx);
+
+       il_set_rate(il);
+
+       mutex_unlock(&il->mutex);
+
+       D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+       struct il_ht_config *ht_conf = &il->current_ht_config;
+       struct ieee80211_sta *sta;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+       D_ASSOC("enter:\n");
+
+       if (!ctx->ht.enabled)
+               return;
+
+       ctx->ht.protection =
+           bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+       ctx->ht.non_gf_sta_present =
+           !!(bss_conf->
+              ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+       ht_conf->single_chain_sufficient = false;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               rcu_read_lock();
+               sta = ieee80211_find_sta(vif, bss_conf->bssid);
+               if (sta) {
+                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+                       int maxstreams;
+
+                       maxstreams =
+                           (ht_cap->mcs.
+                            tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+                           >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+                       maxstreams += 1;
+
+                       if (ht_cap->mcs.rx_mask[1] == 0 &&
+                           ht_cap->mcs.rx_mask[2] == 0)
+                               ht_conf->single_chain_sufficient = true;
+                       if (maxstreams <= 1)
+                               ht_conf->single_chain_sufficient = true;
+               } else {
+                       /*
+                        * If at all, this can only happen through a race
+                        * when the AP disconnects us while we're still
+                        * setting up the connection, in that case mac80211
+                        * will soon tell us about that.
+                        */
+                       ht_conf->single_chain_sufficient = true;
+               }
+               rcu_read_unlock();
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ht_conf->single_chain_sufficient = true;
+               break;
+       default:
+               break;
+       }
+
+       D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+       struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+       /*
+        * inform the ucode that there is no longer an
+        * association and that no more packets should be
+        * sent
+        */
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+       ctx->staging.assoc_id = 0;
+       il_commit_rxon(il, ctx);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       struct il_priv *il = hw->priv;
+       unsigned long flags;
+       __le64 timestamp;
+       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+       if (!skb)
+               return;
+
+       D_MAC80211("enter\n");
+
+       lockdep_assert_held(&il->mutex);
+
+       if (!il->beacon_ctx) {
+               IL_ERR("update beacon but no beacon context!\n");
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       if (il->beacon_skb)
+               dev_kfree_skb(il->beacon_skb);
+
+       il->beacon_skb = skb;
+
+       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+       il->timestamp = le64_to_cpu(timestamp);
+
+       D_MAC80211("leave\n");
+       spin_unlock_irqrestore(&il->lock, flags);
+
+       if (!il_is_ready_rf(il)) {
+               D_MAC80211("leave - RF not ready\n");
+               return;
+       }
+
+       il->cfg->ops->legacy->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                       struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+       struct il_priv *il = hw->priv;
+       struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+       int ret;
+
+       if (WARN_ON(!il->cfg->ops->legacy))
+               return;
+
+       D_MAC80211("changes = 0x%X\n", changes);
+
+       mutex_lock(&il->mutex);
+
+       if (!il_is_alive(il)) {
+               mutex_unlock(&il->mutex);
+               return;
+       }
+
+       if (changes & BSS_CHANGED_QOS) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&il->lock, flags);
+               ctx->qos_data.qos_active = bss_conf->qos;
+               il_update_qos(il, ctx);
+               spin_unlock_irqrestore(&il->lock, flags);
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               /*
+                * the add_interface code must make sure we only ever
+                * have a single interface that could be beaconing at
+                * any time.
+                */
+               if (vif->bss_conf.enable_beacon)
+                       il->beacon_ctx = ctx;
+               else
+                       il->beacon_ctx = NULL;
+       }
+
+       if (changes & BSS_CHANGED_BSSID) {
+               D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+               /*
+                * If there is currently a HW scan going on in the
+                * background then we need to cancel it else the RXON
+                * below/in post_associate will fail.
+                */
+               if (il_scan_cancel_timeout(il, 100)) {
+                       IL_WARN("Aborted scan still in progress after 100ms\n");
+                       D_MAC80211("leaving - scan abort failed.\n");
+                       mutex_unlock(&il->mutex);
+                       return;
+               }
+
+               /* mac80211 only sets assoc when in STATION mode */
+               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+                       memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+                              ETH_ALEN);
+
+                       /* currently needed in a few places */
+                       memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+               } else {
+                       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+               }
+
+       }
+
+       /*
+        * This needs to be after setting the BSSID in case
+        * mac80211 decides to do both changes at once because
+        * it will invoke post_associate.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+               il_beacon_update(hw, vif);
+
+       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+               D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+               if (bss_conf->use_short_preamble)
+                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+       }
+
+       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+               D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+               if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+               if (bss_conf->use_cts_prot)
+                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+               else
+                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+       }
+
+       if (changes & BSS_CHANGED_BASIC_RATES) {
+               /* XXX use this information
+                *
+                * To do that, remove code from il_set_rate() and put something
+                * like this here:
+                *
+                if (A-band)
+                ctx->staging.ofdm_basic_rates =
+                bss_conf->basic_rates;
+                else
+                ctx->staging.ofdm_basic_rates =
+                bss_conf->basic_rates >> 4;
+                ctx->staging.cck_basic_rates =
+                bss_conf->basic_rates & 0xF;
+                */
+       }
+
+       if (changes & BSS_CHANGED_HT) {
+               il_ht_conf(il, vif);
+
+               if (il->cfg->ops->hcmd->set_rxon_chain)
+                       il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+       }
+
+       if (changes & BSS_CHANGED_ASSOC) {
+               D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+               if (bss_conf->assoc) {
+                       il->timestamp = bss_conf->timestamp;
+
+                       if (!il_is_rfkill(il))
+                               il->cfg->ops->legacy->post_associate(il);
+               } else
+                       il_set_no_assoc(il, vif);
+       }
+
+       if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+               D_MAC80211("Changes (%#x) while associated\n", changes);
+               ret = il_send_rxon_assoc(il, ctx);
+               if (!ret) {
+                       /* Sync active_rxon with latest change. */
+                       memcpy((void *)&ctx->active, &ctx->staging,
+                              sizeof(struct il_rxon_cmd));
+               }
+       }
+
+       if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               if (vif->bss_conf.enable_beacon) {
+                       memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+                              ETH_ALEN);
+                       memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+                       il->cfg->ops->legacy->config_ap(il);
+               } else
+                       il_set_no_assoc(il, vif);
+       }
+
+       if (changes & BSS_CHANGED_IBSS) {
+               ret =
+                   il->cfg->ops->legacy->manage_ibss_station(il, vif,
+                                                             bss_conf->
+                                                             ibss_joined);
+               if (ret)
+                       IL_ERR("failed to %s IBSS station %pM\n",
+                              bss_conf->ibss_joined ? "add" : "remove",
+                              bss_conf->bssid);
+       }
+
+       mutex_unlock(&il->mutex);
+
+       D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+       struct il_priv *il = data;
+       u32 inta, inta_mask;
+       u32 inta_fh;
+       unsigned long flags;
+       if (!il)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&il->lock, flags);
+
+       /* Disable (but don't clear!) interrupts here to avoid
+        *    back-to-back ISRs and sporadic interrupts from our NIC.
+        * If we have something to service, the tasklet will re-enable ints.
+        * If we *don't* have something, we'll re-enable before leaving here. */
+       inta_mask = _il_rd(il, CSR_INT_MASK);   /* just for debug */
+       _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+       /* Discover which interrupts are active/pending */
+       inta = _il_rd(il, CSR_INT);
+       inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+       /* Ignore interrupt if there's nothing in NIC to service.
+        * This may be due to IRQ shared with another device,
+        * or due to sporadic interrupts thrown from our NIC. */
+       if (!inta && !inta_fh) {
+               D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+               goto none;
+       }
+
+       if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+               /* Hardware disappeared. It might have already raised
+                * an interrupt */
+               IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+               goto unplugged;
+       }
+
+       D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+             inta_fh);
+
+       inta &= ~CSR_INT_BIT_SCD;
+
+       /* il_irq_tasklet() will service interrupts and re-enable them */
+       if (likely(inta || inta_fh))
+               tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+       spin_unlock_irqrestore(&il->lock, flags);
+       return IRQ_HANDLED;
+
+none:
+       /* re-enable interrupts here since we don't have anything to service. */
+       /* only Re-enable if disabled by irq */
+       if (test_bit(S_INT_ENABLED, &il->status))
+               il_enable_interrupts(il);
+       spin_unlock_irqrestore(&il->lock, flags);
+       return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ *  il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+                    __le16 fc, __le32 *tx_flags)
+{
+       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+               *tx_flags |= TX_CMD_FLG_RTS_MSK;
+               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+               if (!ieee80211_is_mgmt(fc))
+                       return;
+
+               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+               case cpu_to_le16(IEEE80211_STYPE_AUTH):
+               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
+                       break;
+               }
+       } else if (info->control.rates[0].
+                  flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+               *tx_flags |= TX_CMD_FLG_CTS_MSK;
+               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+       }
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644 (file)
index 0000000..1bc0b02
--- /dev/null
@@ -0,0 +1,3424 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>         /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n)             ((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon stats being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE            2304U
+#define MAX_MPDU_SIZE            2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define        DEFAULT_SHORT_RETRY_LIMIT 7U
+#define        DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct il_rx_buf {
+       dma_addr_t page_dma;
+       struct page *page;
+       struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+       /* only for SYNC commands, iff the reply skb is wanted */
+       struct il_host_cmd *source;
+       /*
+        * only for ASYNC commands
+        * (which is somewhat stupid -- look at common.c for instance
+        * which duplicates a bunch of code because the callback isn't
+        * invoked for SYNC commands, if it were and its result passed
+        * through it would be simpler...)
+        */
+       void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+                         struct il_rx_pkt *pkt);
+
+       /* The CMD_SIZE_HUGE flag bit indicates that the command
+        * structure is stored at the end of the shared queue memory. */
+       u32 flags;
+
+        DEFINE_DMA_UNMAP_ADDR(mapping);
+        DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+       int n_bd;               /* number of BDs in this queue */
+       int write_ptr;          /* 1-st empty entry (idx) host_w */
+       int read_ptr;           /* last used entry (idx) host_r */
+       /* use for monitoring and recovering the stuck queue */
+       dma_addr_t dma_addr;    /* physical addr for BD's */
+       int n_win;              /* safe queue win */
+       u32 id;
+       int low_mark;           /* low watermark, resume queue if free
+                                * space more than this */
+       int high_mark;          /* high watermark, stop queue if free
+                                * space less than this */
+};
+
+/* One for each TFD */
+struct il_tx_info {
+       struct sk_buff *skb;
+       struct il_rxon_context *ctx;
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+       struct il_queue q;
+       void *tfds;
+       struct il_device_cmd **cmd;
+       struct il_cmd_meta *meta;
+       struct il_tx_info *txb;
+       unsigned long time_stamp;
+       u8 need_update;
+       u8 sched_retry;
+       u8 active;
+       u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT       5000    /* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT          10      /* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT      1000    /* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ *        It only indicates that 20 MHz channel use is supported; HT40 channel
+ *        usage is indicated by a separate set of regulatory flags for each
+ *        HT40 channel pair.
+ *
+ * NOTE:  Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+       EEPROM_CHANNEL_VALID = (1 << 0),        /* usable for this SKU/geo */
+       EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+       /* Bit 2 Reserved */
+       EEPROM_CHANNEL_ACTIVE = (1 << 3),       /* active scanning allowed */
+       EEPROM_CHANNEL_RADAR = (1 << 4),        /* radar detection required */
+       EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+       /* Bit 6 Reserved (was Narrow Channel) */
+       EEPROM_CHANNEL_DFS = (1 << 7),  /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+       u8 flags;               /* EEPROM_CHANNEL_* flags copied from EEPROM */
+       s8 max_power_avg;       /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION     (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS      (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS          (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS   (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION    (5)
+#define EEPROM_4965_EEPROM_VERSION     (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6)        /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8)        /* 48  bytes */
+#define EEPROM_4965_BOARD_REVISION             (2*0x4F)        /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA                  (2*0x56+1)      /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna).  EEPROM contains:
+ *
+ * 1)  Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2)  Gain table idx used to achieve the target measurement power.
+ *     This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4)  RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+       u8 temperature;         /* Device temperature (Celsius) */
+       u8 gain_idx;            /* Index into gain table */
+       u8 actual_pow;          /* Measured RF output power, half-dBm */
+       s8 pa_det;              /* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel.  EEPROM contains:
+ *
+ * 1)  Channel number measured
+ *
+ * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
+ *     (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+       u8 ch_num;
+       struct il_eeprom_calib_measure
+           measurements[EEPROM_TX_POWER_TX_CHAINS]
+           [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1)  First and last channels within range of the subband.  "0" values
+ *     indicate that this sample set is not being used.
+ *
+ * 2)  Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+       u8 ch_from;             /* channel number of lowest channel in subband */
+       u8 ch_to;               /* channel number of highest channel in subband */
+       struct il_eeprom_calib_ch_info ch1;
+       struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info.  EEPROM contains:
+ *
+ * 1)  Factory-measured saturation power levels (maximum levels at which
+ *     tx power amplifier can output a signal without too much distortion).
+ *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
+ *     values apply to all channels within each of the bands.
+ *
+ * 2)  Factory-measured power supply voltage level.  This is assumed to be
+ *     constant (i.e. same value applies to all channels/bands) while the
+ *     factory measurements are being made.
+ *
+ * 3)  Up to 8 sets of factory-measured txpower calibration values.
+ *     These are for different frequency ranges, since txpower gain
+ *     characteristics of the analog radio circuitry vary with frequency.
+ *
+ *     Not all sets need to be filled with data;
+ *     struct il_eeprom_calib_subband_info contains range of channels
+ *     (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+       u8 saturation_power24;  /* half-dBm (e.g. "34" = 17 dBm) */
+       u8 saturation_power52;  /* half-dBm */
+       __le16 voltage;         /* signed */
+       struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID                    (2*0x08)   /* 2 bytes */
+#define EEPROM_MAC_ADDRESS                  (2*0x15)   /* 6  bytes */
+#define EEPROM_BOARD_REVISION               (2*0x35)   /* 2  bytes */
+#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1) /* 9  bytes */
+#define EEPROM_VERSION                      (2*0x44)   /* 2  bytes */
+#define EEPROM_SKU_CAP                      (2*0x45)   /* 2  bytes */
+#define EEPROM_OEM_MODE                     (2*0x46)   /* 2  bytes */
+#define EEPROM_WOWLAN_MODE                  (2*0x47)   /* 2  bytes */
+#define EEPROM_RADIO_CONFIG                 (2*0x48)   /* 2  bytes */
+#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)   /* 2  bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)  /* bits 0-1   */
+#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3)  /* bits 2-3   */
+#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3)  /* bits 4-5   */
+#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3)  /* bits 6-7   */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF)  /* bits 8-11  */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF)  /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID            (2*0x60)   /* 4  bytes */
+#define EEPROM_REGULATORY_BAND_1            (2*0x62)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)   /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2            (2*0x71)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)   /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3            (2*0x7F)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)   /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4            (2*0x8C)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)   /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5            (2*0x98)   /* 2  bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)   /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
+ *        control channel to which to tune.  RXON also specifies whether the
+ *        control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)  /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)  /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40                 (0)
+
+struct il_eeprom_ops {
+       const u32 regulatory_bands[7];
+       int (*acquire_semaphore) (struct il_priv *il);
+       void (*release_semaphore) (struct il_priv *il);
+};
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+                                                 enum ieee80211_band band,
+                                                 u16 channel);
+
+#define IL_NUM_SCAN_RATES         (2)
+
+struct il4965_channel_tgd_info {
+       u8 type;
+       s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+       s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+       /* maximum power level to prevent clipping for each rate, derived by
+        *   us from this band's saturation power in EEPROM */
+       const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+       struct il3945_tx_power tpc;     /* actual radio and DSP gain settings */
+       s8 power_table_idx;     /* actual (compenst'd) idx into gain table */
+       s8 base_power_idx;      /* gain idx for power at factory temp. */
+       s8 requested_power;     /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+       struct il3945_tx_power tpc;     /* actual radio and DSP gain settings */
+       s8 power_table_idx;     /* actual (compenst'd) idx into gain table */
+       s8 requested_power;     /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ *     with one another!
+ */
+struct il_channel_info {
+       struct il4965_channel_tgd_info tgd;
+       struct il4965_channel_tgh_info tgh;
+       struct il_eeprom_channel eeprom;        /* EEPROM regulatory limit */
+       struct il_eeprom_channel ht40_eeprom;   /* EEPROM regulatory limit for
+                                                * HT40 channel */
+
+       u8 channel;             /* channel number */
+       u8 flags;               /* flags copied from EEPROM */
+       s8 max_power_avg;       /* (dBm) regul. eeprom, normal Tx, any rate */
+       s8 curr_txpow;          /* (dBm) regulatory/spectrum/user (not h/w) limit */
+       s8 min_power;           /* always 0 */
+       s8 scan_power;          /* (dBm) regul. eeprom, direct scans, any rate */
+
+       u8 group_idx;           /* 0-4, maps channel to group1/2/3/4/5 */
+       u8 band_idx;            /* 0-4, maps channel to band1/2/3/4/5 */
+       enum ieee80211_band band;
+
+       /* HT40 channel info */
+       s8 ht40_max_power_avg;  /* (dBm) regul. eeprom, normal Tx, any rate */
+       u8 ht40_flags;          /* flags copied from EEPROM */
+       u8 ht40_extension_channel;      /* HT_IE_EXT_CHANNEL_* */
+
+       /* Radio/DSP gain settings for each "normal" data Tx rate.
+        * These include, in addition to RF and DSP gain, a few fields for
+        *   remembering/modifying gain settings (idxes). */
+       struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+       /* Radio/DSP gain settings for each scan rate, for directed scans. */
+       struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK          0       /* shared */
+#define IL_TX_FIFO_BE          1
+#define IL_TX_FIFO_VI          2       /* shared */
+#define IL_TX_FIFO_VO          3
+#define IL_TX_FIFO_UNUSED      -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES      10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM       4
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+       union {
+               struct ieee80211_hdr frame;
+               struct il_tx_beacon_cmd beacon;
+               u8 raw[IEEE80211_FRAME_LEN];
+               u8 cmd[360];
+       } u;
+       struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+       CMD_SYNC = 0,
+       CMD_SIZE_NORMAL = 0,
+       CMD_NO_SKB = 0,
+       CMD_SIZE_HUGE = (1 << 0),
+       CMD_ASYNC = (1 << 1),
+       CMD_WANT_SKB = (1 << 2),
+       CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+       struct il_cmd_header hdr;       /* uCode API */
+       union {
+               u32 flags;
+               u8 val8;
+               u16 val16;
+               u32 val32;
+               struct il_tx_cmd tx;
+               u8 payload[DEF_CMD_PAYLOAD_SIZE];
+       } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+       const void *data;
+       unsigned long reply_page;
+       void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+                         struct il_rx_pkt *pkt);
+       u32 flags;
+       u16 len;
+       u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+       __le32 *bd;
+       dma_addr_t bd_dma;
+       struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+       struct il_rx_buf *queue[RX_QUEUE_SIZE];
+       u32 read;
+       u32 write;
+       u32 free_count;
+       u32 write_actual;
+       struct list_head rx_free;
+       struct list_head rx_used;
+       int need_update;
+       struct il_rb_status *rb_stts;
+       dma_addr_t rb_stts_dma;
+       spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN         8
+
+#define MAX_TID_COUNT        9
+
+#define IL_INVALID_RATE     0xFF
+#define IL_INVALID_VALUE    -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA).  This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+       u16 txq_id;
+       u16 frame_count;
+       u16 wait_for_ba;
+       u16 start_idx;
+       u64 bitmap;
+       u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+       u8 state;
+};
+
+struct il_tid_data {
+       u16 seq_number;         /* 4965 only */
+       u16 tfds_in_queue;
+       struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+       u32 cipher;
+       int keylen;
+       u8 keyidx;
+       u8 key[32];
+};
+
+union il_ht_rate_supp {
+       u16 rates;
+       struct {
+               u8 siso_rate;
+               u8 mimo_rate;
+       };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
+
+struct il_ht_config {
+       bool single_chain_sufficient;
+       enum ieee80211_smps_mode smps;  /* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+       int qos_active;
+       struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+       struct il_addsta_cmd sta;
+       struct il_tid_data tid[MAX_TID_COUNT];
+       u8 used, ctxid;
+       struct il_hw_key keyinfo;
+       struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+       struct il_rxon_context *ctx;
+       u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+       struct il_rxon_context *ctx;
+       u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+       void *v_addr;           /* access by driver */
+       dma_addr_t p_addr;      /* access by card's busmaster DMA */
+       u32 len;                /* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+       __le32 ver;             /* major/minor/API/serial */
+       struct {
+               __le32 inst_size;       /* bytes of runtime code */
+               __le32 data_size;       /* bytes of runtime data */
+               __le32 init_size;       /* bytes of init code */
+               __le32 init_data_size;  /* bytes of init data */
+               __le32 boot_size;       /* bytes of bootstrap code */
+               u8 data[0];     /* in same order as sizes */
+       } v1;
+};
+
+struct il4965_ibss_seq {
+       u8 mac[ETH_ALEN];
+       u16 seq_num;
+       u16 frag_num;
+       unsigned long packet_time;
+       struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+       u16 min_nrg_cck;
+       u16 max_nrg_cck;
+
+       u16 nrg_th_cck;
+       u16 nrg_th_ofdm;
+
+       u16 auto_corr_min_ofdm;
+       u16 auto_corr_min_ofdm_mrc;
+       u16 auto_corr_min_ofdm_x1;
+       u16 auto_corr_min_ofdm_mrc_x1;
+
+       u16 auto_corr_max_ofdm;
+       u16 auto_corr_max_ofdm_mrc;
+       u16 auto_corr_max_ofdm_x1;
+       u16 auto_corr_max_ofdm_mrc_x1;
+
+       u16 auto_corr_max_cck;
+       u16 auto_corr_max_cck_mrc;
+       u16 auto_corr_min_cck;
+       u16 auto_corr_min_cck_mrc;
+
+       u16 barker_corr_th_min;
+       u16 barker_corr_th_min_mrc;
+       u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+       u8 max_txq_num;
+       u8 dma_chnl_num;
+       u16 scd_bc_tbls_size;
+       u32 tfd_size;
+       u8 tx_chains_num;
+       u8 rx_chains_num;
+       u8 valid_tx_ant;
+       u8 valid_rx_ant;
+       u16 max_rxq_size;
+       u16 max_rxq_log;
+       u32 rx_page_order;
+       u32 rx_wrt_ptr_reg;
+       u8 max_stations;
+       u8 ht40_channel;
+       u8 max_beacon_itrvl;    /* in 1024 ms */
+       u32 max_inst_size;
+       u32 max_data_size;
+       u32 max_bsm_size;
+       u32 ct_kill_threshold;  /* value in hw-dependent units */
+       u16 beacon_time_tsf_bits;
+       const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_         <-- Is part of iwlwifi
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_      <-- Called from work queue context
+ * il4965_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+extern int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+       return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+                                             i < q->write_ptr) : !(i <
+                                                                   q->read_ptr
+                                                                   && i >=
+                                                                   q->
+                                                                   write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+       /*
+        * This is for init calibration result and scan command which
+        * required buffer > TFD_MAX_PAYLOAD_SIZE,
+        * the big buffer at end of command array
+        */
+       if (is_huge)
+               return q->n_win;        /* must be power of 2 */
+
+       /* Otherwise, use normal size buffers */
+       return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+       dma_addr_t dma;
+       void *addr;
+       size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO     0
+#define IL_OPERATION_MODE_HT_ONLY  1
+#define IL_OPERATION_MODE_MIXED    2
+#define IL_OPERATION_MODE_20MHZ    3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE           0xFFFF
+#define IL4965_CAL_NUM_BEACONS         20
+#define IL_CAL_NUM_BEACONS             16
+#define MAXIMUM_ALLOWED_PATHLOSS       15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM  50
+#define MIN_FA_OFDM  5
+#define MAX_FA_CCK   50
+#define MIN_FA_CCK   5
+
+#define AUTO_CORR_STEP_OFDM       1
+
+#define AUTO_CORR_STEP_CCK     3
+#define AUTO_CORR_MAX_TH_CCK   160
+
+#define NRG_DIFF               2
+#define NRG_STEP_CCK           2
+#define NRG_MARGIN             8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
+
+#define CHAIN_A             0
+#define CHAIN_B             1
+#define CHAIN_C             2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER                        0xFF00
+#define IN_BAND_FILTER                 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE    0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L     20
+#define NUM_RX_CHAINS           3
+
+enum il4965_false_alarm_state {
+       IL_FA_TOO_MANY = 0,
+       IL_FA_TOO_FEW = 1,
+       IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+       IL_CHAIN_NOISE_ALIVE = 0,       /* must be 0 */
+       IL_CHAIN_NOISE_ACCUMULATE,
+       IL_CHAIN_NOISE_CALIBRATED,
+       IL_CHAIN_NOISE_DONE,
+};
+
+enum il4965_calib_enabled_state {
+       IL_CALIB_DISABLED = 0,  /* must be 0 */
+       IL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum il_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum il_calib {
+       IL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct il_calib_result {
+       void *buf;
+       size_t buf_len;
+};
+
+enum ucode_type {
+       UCODE_NONE = 0,
+       UCODE_INIT,
+       UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+       u32 auto_corr_ofdm;
+       u32 auto_corr_ofdm_mrc;
+       u32 auto_corr_ofdm_x1;
+       u32 auto_corr_ofdm_mrc_x1;
+       u32 auto_corr_cck;
+       u32 auto_corr_cck_mrc;
+
+       u32 last_bad_plcp_cnt_ofdm;
+       u32 last_fa_cnt_ofdm;
+       u32 last_bad_plcp_cnt_cck;
+       u32 last_fa_cnt_cck;
+
+       u32 nrg_curr_state;
+       u32 nrg_prev_state;
+       u32 nrg_value[10];
+       u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+       u32 nrg_silence_ref;
+       u32 nrg_energy_idx;
+       u32 nrg_silence_idx;
+       u32 nrg_th_cck;
+       s32 nrg_auto_corr_silence_diff;
+       u32 num_in_cck_no_fa;
+       u32 nrg_th_ofdm;
+
+       u16 barker_corr_th_min;
+       u16 barker_corr_th_min_mrc;
+       u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+       u32 active_chains;
+       u32 chain_noise_a;
+       u32 chain_noise_b;
+       u32 chain_noise_c;
+       u32 chain_signal_a;
+       u32 chain_signal_b;
+       u32 chain_signal_c;
+       u16 beacon_count;
+       u8 disconn_array[NUM_RX_CHAINS];
+       u8 delta_gain_code[NUM_RX_CHAINS];
+       u8 radio_write;
+       u8 state;
+};
+
+#define        EEPROM_SEM_TIMEOUT 10   /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000    /* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES     (256)
+#define IL_TRAFFIC_ENTRY_SIZE  (64)
+
+enum {
+       MEASUREMENT_READY = (1 << 0),
+       MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+       u32 hw;
+       u32 sw;
+       u32 err_code;
+       u32 sch;
+       u32 alive;
+       u32 rfkill;
+       u32 ctkill;
+       u32 wakeup;
+       u32 rx;
+       u32 handlers[IL_CN_MAX];
+       u32 tx;
+       u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+       MANAGEMENT_ASSOC_REQ = 0,
+       MANAGEMENT_ASSOC_RESP,
+       MANAGEMENT_REASSOC_REQ,
+       MANAGEMENT_REASSOC_RESP,
+       MANAGEMENT_PROBE_REQ,
+       MANAGEMENT_PROBE_RESP,
+       MANAGEMENT_BEACON,
+       MANAGEMENT_ATIM,
+       MANAGEMENT_DISASSOC,
+       MANAGEMENT_AUTH,
+       MANAGEMENT_DEAUTH,
+       MANAGEMENT_ACTION,
+       MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+       CONTROL_BACK_REQ = 0,
+       CONTROL_BACK,
+       CONTROL_PSPOLL,
+       CONTROL_RTS,
+       CONTROL_CTS,
+       CONTROL_ACK,
+       CONTROL_CFEND,
+       CONTROL_CFENDACK,
+       CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       u32 mgmt[MANAGEMENT_MAX];
+       u32 ctrl[CONTROL_MAX];
+       u32 data_cnt;
+       u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX        (0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF        (0x40)
+#define IL_HOST_INT_TIMEOUT_MIN        (0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX  (0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF  (0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN  (0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT      (2000)
+#define IL_LONG_WD_TIMEOUT     (10000)
+#define IL_MAX_WD_TIMEOUT      (120000)
+
+struct il_force_reset {
+       int reset_request_count;
+       int reset_success_count;
+       int reset_reject_count;
+       unsigned long reset_duration;
+       unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting  */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0  - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS     24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS     22
+
+struct il_rxon_context {
+       struct ieee80211_vif *vif;
+
+       const u8 *ac_to_fifo;
+       const u8 *ac_to_queue;
+       u8 mcast_queue;
+
+       /*
+        * We could use the vif to indicate active, but we
+        * also need it to be active during disabling when
+        * we already removed the vif for type setting.
+        */
+       bool always_active, is_active;
+
+       bool ht_need_multiple_chains;
+
+       int ctxid;
+
+       u32 interface_modes, exclusive_interface_modes;
+       u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+       /*
+        * We declare this const so it can only be
+        * changed via explicit cast within the
+        * routines that actually update the physical
+        * hardware.
+        */
+       const struct il_rxon_cmd active;
+       struct il_rxon_cmd staging;
+
+       struct il_rxon_time_cmd timing;
+
+       struct il_qos_info qos_data;
+
+       u8 bcast_sta_id, ap_sta_id;
+
+       u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+       u8 qos_cmd;
+       u8 wep_key_cmd;
+
+       struct il_wep_key wep_keys[WEP_KEYS_MAX];
+       u8 key_mapping_keys;
+
+       __le32 station_flags;
+
+       struct {
+               bool non_gf_sta_present;
+               u8 protection;
+               bool enabled, is_40mhz;
+               u8 extension_chan_offset;
+       } ht;
+};
+
+struct il_power_mgr {
+       struct il_powertable_cmd sleep_cmd;
+       struct il_powertable_cmd sleep_cmd_next;
+       int debug_sleep_level_override;
+       bool pci_pm;
+};
+
+struct il_priv {
+
+       /* ieee device used by generic ieee processing code */
+       struct ieee80211_hw *hw;
+       struct ieee80211_channel *ieee_channels;
+       struct ieee80211_rate *ieee_rates;
+       struct il_cfg *cfg;
+
+       /* temporary frame storage list */
+       struct list_head free_frames;
+       int frames_count;
+
+       enum ieee80211_band band;
+       int alloc_rxb_page;
+
+       void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+                                    struct il_rx_buf *rxb);
+
+       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+       /* spectrum measurement report caching */
+       struct il_spectrum_notification measure_report;
+       u8 measurement_status;
+
+       /* ucode beacon time */
+       u32 ucode_beacon_time;
+       int missed_beacon_threshold;
+
+       /* track IBSS manager (last beacon) status */
+       u32 ibss_manager;
+
+       /* force reset */
+       struct il_force_reset force_reset;
+
+       /* we allocate array of il_channel_info for NIC's valid channels.
+        *    Access via channel # using indirect idx array */
+       struct il_channel_info *channel_info;   /* channel info array */
+       u8 channel_count;       /* # of channels */
+
+       /* thermal calibration */
+       s32 temperature;        /* degrees Kelvin */
+       s32 last_temperature;
+
+       /* init calibration results */
+       struct il_calib_result calib_results[IL_CALIB_MAX];
+
+       /* Scan related variables */
+       unsigned long scan_start;
+       unsigned long scan_start_tsf;
+       void *scan_cmd;
+       enum ieee80211_band scan_band;
+       struct cfg80211_scan_request *scan_request;
+       struct ieee80211_vif *scan_vif;
+       u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+       u8 mgmt_tx_ant;
+
+       /* spinlock */
+       spinlock_t lock;        /* protect general shared data */
+       spinlock_t hcmd_lock;   /* protect hcmd */
+       spinlock_t reg_lock;    /* protect hw register access */
+       struct mutex mutex;
+
+       /* basic pci-network driver stuff */
+       struct pci_dev *pci_dev;
+
+       /* pci hardware address support */
+       void __iomem *hw_base;
+       u32 hw_rev;
+       u32 hw_wa_rev;
+       u8 rev_id;
+
+       /* command queue number */
+       u8 cmd_queue;
+
+       /* max number of station keys */
+       u8 sta_key_max_num;
+
+       /* EEPROM MAC addresses */
+       struct mac_address addresses[1];
+
+       /* uCode images, save to reload in case of failure */
+       int fw_idx;             /* firmware we're trying to load */
+       u32 ucode_ver;          /* version of ucode, copy of
+                                  il_ucode.ver */
+       struct fw_desc ucode_code;      /* runtime inst */
+       struct fw_desc ucode_data;      /* runtime data original */
+       struct fw_desc ucode_data_backup;       /* runtime data save/restore */
+       struct fw_desc ucode_init;      /* initialization inst */
+       struct fw_desc ucode_init_data; /* initialization data */
+       struct fw_desc ucode_boot;      /* bootstrap inst */
+       enum ucode_type ucode_type;
+       u8 ucode_write_complete;        /* the image write is complete */
+       char firmware_name[25];
+
+       struct il_rxon_context ctx;
+
+       __le16 switch_channel;
+
+       /* 1st responses from initialize and runtime uCode images.
+        * _4965's initialize alive response contains some calibration data. */
+       struct il_init_alive_resp card_alive_init;
+       struct il_alive_resp card_alive;
+
+       u16 active_rate;
+
+       u8 start_calib;
+       struct il_sensitivity_data sensitivity_data;
+       struct il_chain_noise_data chain_noise_data;
+       __le16 sensitivity_tbl[HD_TBL_SIZE];
+
+       struct il_ht_config current_ht_config;
+
+       /* Rate scaling data */
+       u8 retry_rate;
+
+       wait_queue_head_t wait_command_queue;
+
+       int activity_timer_active;
+
+       /* Rx and Tx DMA processing queues */
+       struct il_rx_queue rxq;
+       struct il_tx_queue *txq;
+       unsigned long txq_ctx_active_msk;
+       struct il_dma_ptr kw;   /* keep warm address */
+       struct il_dma_ptr scd_bc_tbls;
+
+       u32 scd_base_addr;      /* scheduler sram base address */
+
+       unsigned long status;
+
+       /* counts mgmt, ctl, and data packets */
+       struct traffic_stats tx_stats;
+       struct traffic_stats rx_stats;
+
+       /* counts interrupts */
+       struct isr_stats isr_stats;
+
+       struct il_power_mgr power_data;
+
+       /* context information */
+       u8 bssid[ETH_ALEN];     /* used only on 3945 but filled by core */
+
+       /* station table variables */
+
+       /* Note: if lock and sta_lock are needed, lock must be acquired first */
+       spinlock_t sta_lock;
+       int num_stations;
+       struct il_station_entry stations[IL_STATION_COUNT];
+       unsigned long ucode_key_table;
+
+       /* queue refcounts */
+#define IL_MAX_HW_QUEUES       32
+       unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+       /* for each AC */
+       atomic_t queue_stop_count[4];
+
+       /* Indication if ieee80211_ops->open has been called */
+       u8 is_open;
+
+       u8 mac80211_registered;
+
+       /* eeprom -- this is in the card's little endian byte order */
+       u8 *eeprom;
+       struct il_eeprom_calib_info *calib_info;
+
+       enum nl80211_iftype iw_mode;
+
+       /* Last Rx'd beacon timestamp */
+       u64 timestamp;
+
+       union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+               struct {
+                       void *shared_virt;
+                       dma_addr_t shared_phys;
+
+                       struct delayed_work thermal_periodic;
+                       struct delayed_work rfkill_poll;
+
+                       struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+                       struct il3945_notif_stats accum_stats;
+                       struct il3945_notif_stats delta_stats;
+                       struct il3945_notif_stats max_delta;
+#endif
+
+                       u32 sta_supp_rates;
+                       int last_rx_rssi;       /* From Rx packet stats */
+
+                       /* Rx'd packet timing information */
+                       u32 last_beacon_time;
+                       u64 last_tsf;
+
+                       /*
+                        * each calibration channel group in the
+                        * EEPROM has a derived clip setting for
+                        * each rate.
+                        */
+                       const struct il3945_clip_group clip_groups[5];
+
+               } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+               struct {
+                       struct il_rx_phy_res last_phy_res;
+                       bool last_phy_res_valid;
+
+                       struct completion firmware_loading_complete;
+
+                       /*
+                        * chain noise reset and gain commands are the
+                        * two extra calibration commands follows the standard
+                        * phy calibration commands
+                        */
+                       u8 phy_calib_chain_noise_reset_cmd;
+                       u8 phy_calib_chain_noise_gain_cmd;
+
+                       struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+                       struct il_notif_stats accum_stats;
+                       struct il_notif_stats delta_stats;
+                       struct il_notif_stats max_delta;
+#endif
+
+               } _4965;
+#endif
+       };
+
+       struct il_hw_params hw_params;
+
+       u32 inta_mask;
+
+       struct workqueue_struct *workqueue;
+
+       struct work_struct restart;
+       struct work_struct scan_completed;
+       struct work_struct rx_replenish;
+       struct work_struct abort_scan;
+
+       struct il_rxon_context *beacon_ctx;
+       struct sk_buff *beacon_skb;
+
+       struct work_struct tx_flush;
+
+       struct tasklet_struct irq_tasklet;
+
+       struct delayed_work init_alive_start;
+       struct delayed_work alive_start;
+       struct delayed_work scan_check;
+
+       /* TX Power */
+       s8 tx_power_user_lmt;
+       s8 tx_power_device_lmt;
+       s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+       /* debugging info */
+       u32 debug_level;        /* per device debugging will override global
+                                  il_debug_level if set */
+#endif                         /* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       /* debugfs */
+       u16 tx_traffic_idx;
+       u16 rx_traffic_idx;
+       u8 *tx_traffic;
+       u8 *rx_traffic;
+       struct dentry *debugfs_dir;
+       u32 dbgfs_sram_offset, dbgfs_sram_len;
+       bool disable_ht40;
+#endif                         /* CONFIG_IWLEGACY_DEBUGFS */
+
+       struct work_struct txpower_work;
+       u32 disable_sens_cal;
+       u32 disable_chain_noise_cal;
+       u32 disable_tx_power_cal;
+       struct work_struct run_time_calib_work;
+       struct timer_list stats_periodic;
+       struct timer_list watchdog;
+       bool hw_ready;
+
+       struct led_classdev led;
+       unsigned long blink_on, blink_off;
+       bool led_registered;
+};                             /*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+       set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+       clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline struct ieee80211_hdr *
+il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
+{
+       if (il->txq[txq_id].txb[idx].skb)
+               return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
+                   data;
+       return NULL;
+}
+
+static inline struct il_rxon_context *
+il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+       struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+       return vif_priv->ctx;
+}
+
+#define for_each_context(il, _ctx) \
+       for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+       return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+       return il_is_associated(il);
+}
+
+static inline int
+il_is_associated_ctx(struct il_rxon_context *ctx)
+{
+       return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+       if (ch_info == NULL)
+               return 0;
+       return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+       return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+       return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+       return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+       return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+       __free_pages(page, il->hw_params.rx_page_order);
+       il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+       free_pages(page, il->hw_params.rx_page_order);
+       il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+       .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+       .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT              1024
+
+#define IL_SKU_G       0x1
+#define IL_SKU_A       0x2
+#define IL_SKU_N       0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000)   /* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+struct il_hcmd_ops {
+       int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
+       int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
+       void (*set_rxon_chain) (struct il_priv *il,
+                               struct il_rxon_context *ctx);
+};
+
+struct il_hcmd_utils_ops {
+       u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+       u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+       int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+       void (*post_scan) (struct il_priv *il);
+};
+
+struct il_apm_ops {
+       int (*init) (struct il_priv *il);
+       void (*config) (struct il_priv *il);
+};
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+       ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos);
+       ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos);
+       ssize_t(*general_stats_read) (struct file *file,
+                                     char __user *user_buf, size_t count,
+                                     loff_t *ppos);
+};
+#endif
+
+struct il_temp_ops {
+       void (*temperature) (struct il_priv *il);
+};
+
+struct il_lib_ops {
+       /* set hw dependent parameters */
+       int (*set_hw_params) (struct il_priv *il);
+       /* Handling TX */
+       void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+                                        struct il_tx_queue *txq,
+                                        u16 byte_cnt);
+       int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+                                     struct il_tx_queue *txq, dma_addr_t addr,
+                                     u16 len, u8 reset, u8 pad);
+       void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+       int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+       /* setup Rx handler */
+       void (*handler_setup) (struct il_priv *il);
+       /* alive notification after init uCode load */
+       void (*init_alive_start) (struct il_priv *il);
+       /* check validity of rtc data address */
+       int (*is_valid_rtc_data_addr) (u32 addr);
+       /* 1st ucode load */
+       int (*load_ucode) (struct il_priv *il);
+
+       void (*dump_nic_error_log) (struct il_priv *il);
+       int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+       int (*set_channel_switch) (struct il_priv *il,
+                                  struct ieee80211_channel_switch *ch_switch);
+       /* power management */
+       struct il_apm_ops apm_ops;
+
+       /* power */
+       int (*send_tx_power) (struct il_priv *il);
+       void (*update_chain_flags) (struct il_priv *il);
+
+       /* eeprom operations */
+       struct il_eeprom_ops eeprom_ops;
+
+       /* temperature */
+       struct il_temp_ops temp_ops;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+       struct il_debugfs_ops debugfs_ops;
+#endif
+
+};
+
+struct il_led_ops {
+       int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_legacy_ops {
+       void (*post_associate) (struct il_priv *il);
+       void (*config_ap) (struct il_priv *il);
+       /* station management */
+       int (*update_bcast_stations) (struct il_priv *il);
+       int (*manage_ibss_station) (struct il_priv *il,
+                                   struct ieee80211_vif *vif, bool add);
+};
+
+struct il_ops {
+       const struct il_lib_ops *lib;
+       const struct il_hcmd_ops *hcmd;
+       const struct il_hcmd_utils_ops *utils;
+       const struct il_led_ops *led;
+       const struct il_nic_ops *nic;
+       const struct il_legacy_ops *legacy;
+       const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct il_mod_params {
+       int sw_crypto;          /* def: 0 = using hardware encryption */
+       int disable_hw_scan;    /* def: 0 = use h/w scan */
+       int num_of_queues;      /* def: HW dependent */
+       int disable_11n;        /* def: 0 = 11n capabilities enabled */
+       int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
+       int antenna;            /* def: 0 = both antennas (use diversity) */
+       int restart_fw;         /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ *     to the deviation to achieve the desired led frequency.
+ *     The detail algorithm is described in common.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ *     sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ *     chain noise calibration operation
+ */
+struct il_base_params {
+       int eeprom_size;
+       int num_of_queues;      /* def: HW dependent */
+       int num_of_ampdu_queues;        /* def: HW dependent */
+       /* for il_apm_init() */
+       u32 pll_cfg_val;
+       bool set_l0s;
+       bool use_bsm;
+
+       u16 led_compensation;
+       int chain_noise_num_beacons;
+       unsigned int wd_timeout;
+       bool temperature_kelvin;
+       const bool ucode_tracing;
+       const bool sensitivity_calib_by_driver;
+       const bool chain_noise_calib_by_driver;
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY       (0<<1)
+#define IL_LED_LINK           (1<<1)
+
+/*
+ * LED mode
+ *    IL_LED_DEFAULT:  use device default
+ *    IL_LED_RF_STATE: turn LED on/off based on RF state
+ *                     LED ON  = RF ON
+ *                     LED OFF = RF OFF
+ *    IL_LED_BLINK:    adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+       IL_LED_DEFAULT,
+       IL_LED_RF_STATE,
+       IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *     (.ucode) will be added to filename before loading from disk. The
+ *     filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ *     Driver interacts with Firmware API version >= 2.
+ * } else {
+ *     Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+       /* params specific to an individual device within a device family */
+       const char *name;
+       const char *fw_name_pre;
+       const unsigned int ucode_api_max;
+       const unsigned int ucode_api_min;
+       u8 valid_tx_ant;
+       u8 valid_rx_ant;
+       unsigned int sku;
+       u16 eeprom_ver;
+       u16 eeprom_calib_ver;
+       const struct il_ops *ops;
+       /* module based parameters which can be set from modprobe cmd */
+       const struct il_mod_params *mod_params;
+       /* params not likely to change within a device family */
+       struct il_base_params *base_params;
+       /* params likely to change within a device family */
+       u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+       enum il_led_mode led_mode;
+};
+
+/***************************
+ *   L i b                 *
+ ***************************/
+
+struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+                         int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+                       struct il_rxon_context *ctx);
+void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+                          enum ieee80211_band band, struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+                          struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il,
+                                 struct il_rxon_context *ctx);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+                         u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                           enum nl80211_iftype newtype, bool newp2p);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_alloc_traffic_mem(struct il_priv *il);
+void il_free_traffic_mem(struct il_priv *il);
+void il_reset_traffic_log(struct il_priv *il);
+void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+                             struct ieee80211_hdr *header);
+void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+                             struct ieee80211_hdr *header);
+const char *il_get_mgmt_string(int cmd);
+const char *il_get_ctrl_string(int cmd);
+void il_clear_traffic_stats(struct il_priv *il);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+       return 0;
+}
+
+static inline void
+il_free_traffic_mem(struct il_priv *il)
+{
+}
+
+static inline void
+il_reset_traffic_log(struct il_priv *il)
+{
+}
+
+static inline void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+                        struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+                        struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+/* Handlers */
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+                    u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
+                      int slots_num, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct cfg80211_scan_request *req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+                     const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+                            u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+                             struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME       cpu_to_le16(10)     /* msec */
+#define IL_PLCP_QUIET_THRESH       cpu_to_le16(1)      /* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG         (HZ * 7)
+
+/*****************************************************
+ *   S e n d i n g     H o s t     C o m m a n d s   *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+                                const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+                         void (*callback) (struct il_priv *il,
+                                           struct il_device_cmd *cmd,
+                                           struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI                                              *
+ *****************************************************/
+
+static inline u16
+il_pcie_link_ctl(struct il_priv *il)
+{
+       int pos;
+       u16 pci_lnk_ctl;
+       pos = pci_pcie_cap(il->pci_dev);
+       pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+       return pci_lnk_ctl;
+}
+
+void il_bg_watchdog(unsigned long data);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+                         u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int il_pci_suspend(struct device *device);
+int il_pci_resume(struct device *device);
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS       (&il_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IL_LEGACY_PM_OPS       NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+*  Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+*  GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS   *****/
+
+#define S_HCMD_ACTIVE  0       /* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED  2
+#define S_RF_KILL_HW   3
+#define S_CT_KILL              4
+#define S_INIT         5
+#define S_ALIVE                6
+#define S_READY                7
+#define S_TEMPERATURE  8
+#define S_GEO_CONFIGURED       9
+#define S_EXIT_PENDING 10
+#define S_STATS                12
+#define S_SCANNING             13
+#define S_SCAN_ABORTING        14
+#define S_SCAN_HW              15
+#define S_POWER_PMI    16
+#define S_FW_ERROR             17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+       /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+        * set but EXIT_PENDING is not */
+       return test_bit(S_READY, &il->status) &&
+           test_bit(S_GEO_CONFIGURED, &il->status) &&
+           !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+       return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+       return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill_hw(struct il_priv *il)
+{
+       return test_bit(S_RF_KILL_HW, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+       return il_is_rfkill_hw(il);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+       return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+       if (il_is_rfkill(il))
+               return 0;
+
+       return il_is_ready(il);
+}
+
+extern void il_send_bt_config(struct il_priv *il);
+extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+static inline int
+il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+       return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+{
+       return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+                         __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+#include <linux/io.h>
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+       iowrite8(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+       iowrite32(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+       return ioread32(il->hw_base + ofs);
+}
+
+#define IL_POLL_INTERVAL 10    /* microseconds */
+static inline int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((_il_rd(il, addr) & mask) == (bits & mask))
+                       return t;
+               udelay(IL_POLL_INTERVAL);
+               t += IL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+       _il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       _il_set_bit(p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+       _il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&p->reg_lock, reg_flags);
+       _il_clear_bit(p, r, m);
+       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+
+static inline int
+_il_grab_nic_access(struct il_priv *il)
+{
+       int ret;
+       u32 val;
+
+       /* this bit wakes up the NIC */
+       _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+       /*
+        * These bits say the device is running, and should keep running for
+        * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+        * but they do not indicate that embedded SRAM is restored yet;
+        * 3945 and 4965 have volatile SRAM, and must save/restore contents
+        * to/from host DRAM when sleeping/waking for power-saving.
+        * Each direction takes approximately 1/4 millisecond; with this
+        * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+        * series of register accesses are expected (e.g. reading Event Log),
+        * to keep device from sleeping.
+        *
+        * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+        * SRAM is okay/restored.  We don't check that here because this call
+        * is just for hardware register access; but GP1 MAC_SLEEP check is a
+        * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+        *
+        */
+       ret =
+           _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+                        (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+                         CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+       if (ret < 0) {
+               val = _il_rd(il, CSR_GP_CNTRL);
+               IL_ERR("MAC is in deep sleep!.  CSR_GP_CNTRL = 0x%08X\n", val);
+               _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+       _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+       u32 value;
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       _il_grab_nic_access(il);
+       value = _il_rd(il, reg);
+       _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+       return value;
+
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       if (!_il_grab_nic_access(il)) {
+               _il_wr(il, reg, value);
+               _il_release_nic_access(il);
+       }
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_write_reg_buf(struct il_priv *il, u32 reg, u32 len, u32 * values)
+{
+       u32 count = sizeof(u32);
+
+       if (il != NULL && values != NULL) {
+               for (; 0 < len; len -= count, reg += count, values++)
+                       il_wr(il, reg, *values);
+       }
+}
+
+static inline int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((il_rd(il, addr) & mask) == mask)
+                       return t;
+               udelay(IL_POLL_INTERVAL);
+               t += IL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+       _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+       rmb();
+       return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+       unsigned long reg_flags;
+       u32 val;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       _il_grab_nic_access(il);
+       val = _il_rd_prph(il, reg);
+       _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+       return val;
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+       _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+       wmb();
+       _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       if (!_il_grab_nic_access(il)) {
+               _il_wr_prph(il, addr, val);
+               _il_release_nic_access(il);
+       }
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define _il_set_bits_prph(il, reg, mask) \
+_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask))
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       _il_grab_nic_access(il);
+       _il_set_bits_prph(il, reg, mask);
+       _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define _il_set_bits_mask_prph(il, reg, bits, mask) \
+_il_wr_prph(il, reg,                           \
+                ((_il_rd_prph(il, reg) & mask) | bits))
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       _il_grab_nic_access(il);
+       _il_set_bits_mask_prph(il, reg, bits, mask);
+       _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+       unsigned long reg_flags;
+       u32 val;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       _il_grab_nic_access(il);
+       val = _il_rd_prph(il, reg);
+       _il_wr_prph(il, reg, (val & ~mask));
+       _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+       unsigned long reg_flags;
+       u32 value;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       _il_grab_nic_access(il);
+
+       _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+       rmb();
+       value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+       _il_release_nic_access(il);
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+       return value;
+}
+
+static inline void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       if (!_il_grab_nic_access(il)) {
+               _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+               wmb();
+               _il_wr(il, HBUS_TARG_MEM_WDAT, val);
+               _il_release_nic_access(il);
+       }
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_write_targ_mem_buf(struct il_priv *il, u32 addr, u32 len, u32 * values)
+{
+       unsigned long reg_flags;
+
+       spin_lock_irqsave(&il->reg_lock, reg_flags);
+       if (!_il_grab_nic_access(il)) {
+               _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+               wmb();
+               for (; 0 < len; len -= sizeof(u32), values++)
+                       _il_wr(il, HBUS_TARG_MEM_WDAT, *values);
+
+               _il_release_nic_access(il);
+       }
+       spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0)    /* driver entry is active */
+#define IL_STA_UCODE_ACTIVE  BIT(1)    /* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS  BIT(2)        /* ucode entry is in process of
+                                          being activated */
+#define IL_STA_LOCAL BIT(3)    /* station state not directed by mac80211;
+                                  (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4)    /* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+                         const u8 *addr, bool is_ap,
+                         struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+                  const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+                  struct il_link_quality_cmd *lq, u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+       unsigned long flags;
+       struct il_rxon_context *ctx = &il->ctx;
+
+       spin_lock_irqsave(&il->sta_lock, flags);
+       memset(il->stations, 0, sizeof(il->stations));
+       il->num_stations = 0;
+
+       il->ucode_key_table = 0;
+
+       /*
+        * Remove all key information that is not stored as part
+        * of station information since mac80211 may not have had
+        * a chance to remove all the keys. When device is
+        * reconfigured by mac80211 after an error all keys will
+        * be reconfigured.
+        */
+       memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+       ctx->key_mapping_keys = 0;
+
+       spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+       if (WARN_ON(!sta))
+               return IL_INVALID_STATION;
+
+       return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
+                      struct ieee80211_sta *sta)
+{
+       int sta_id;
+
+       if (!sta)
+               return context->bcast_sta_id;
+
+       sta_id = il_sta_id(sta);
+
+       /*
+        * mac80211 should not be passing a partially
+        * initialised station!
+        */
+       WARN_ON(sta_id == IL_INVALID_STATION);
+
+       return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+       return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+       return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+       if (desc->v_addr)
+               dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+                                 desc->p_addr);
+       desc->v_addr = NULL;
+       desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+       if (!desc->len) {
+               desc->v_addr = NULL;
+               return -EINVAL;
+       }
+
+       desc->v_addr =
+           dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
+                              GFP_KERNEL);
+       return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+       BUG_ON(ac > 3);         /* only have 2 bits */
+       BUG_ON(hwq > 31);       /* only use 5 bits */
+
+       txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+       u8 queue = txq->swq_id;
+       u8 ac = queue & 3;
+       u8 hwq = (queue >> 2) & 0x1f;
+
+       if (test_and_clear_bit(hwq, il->queue_stopped))
+               if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+                       ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+       u8 queue = txq->swq_id;
+       u8 ac = queue & 3;
+       u8 hwq = (queue >> 2) & 0x1f;
+
+       if (!test_and_set_bit(hwq, il->queue_stopped))
+               if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+                       ieee80211_stop_queue(il->hw, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+       clear_bit(S_INT_ENABLED, &il->status);
+
+       /* disable interrupts from uCode/NIC to host */
+       _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+       /* acknowledge/clear/reset any interrupts still pending
+        * from uCode or flow handler (Rx/Tx DMA) */
+       _il_wr(il, CSR_INT, 0xffffffff);
+       _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+       _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+       set_bit(S_INT_ENABLED, &il->status);
+       _il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+       return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+       return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ *                          in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ *                          which was transferred
+ */
+struct il_rb_status {
+       __le16 closed_rb_num;
+       __le16 closed_fr_num;
+       __le16 finished_rb_num;
+       __le16 finished_fr_nam;
+       __le32 __unused;        /* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX      (256)
+#define TFD_QUEUE_SIZE_BC_DUP  (64)
+#define TFD_QUEUE_BC_SIZE      (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK        DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS          20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+       return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ *     unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ *           4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+       __le32 lo;
+       __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ *          5   reserved
+ *          6-7 padding (not used)
+ * @ tbs[20]   transmit frame buffer descriptors
+ * @ __pad     padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM.  These buffers collectively contain the (one) frame described
+ * by the TFD.  Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
+ * of (4K - 4).  The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+       u8 __reserved1[3];
+       u8 num_tbs;
+       struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+       __le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT  0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN   0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN    0x02
+
+struct il_rate_info {
+       u8 plcp;                /* uCode API:  RATE_6M_PLCP, etc. */
+       u8 plcp_siso;           /* uCode API:  RATE_SISO_6M_PLCP, etc. */
+       u8 plcp_mimo2;          /* uCode API:  RATE_MIMO2_6M_PLCP, etc. */
+       u8 ieee;                /* MAC header:  RATE_6M_IEEE, etc. */
+       u8 prev_ieee;           /* previous rate in IEEE speeds */
+       u8 next_ieee;           /* next rate in IEEE speeds */
+       u8 prev_rs;             /* previous rate used in rs algo */
+       u8 next_rs;             /* next rate used in rs algo */
+       u8 prev_rs_tgg;         /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;         /* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+       u8 plcp;                /* uCode API:  RATE_6M_PLCP, etc. */
+       u8 ieee;                /* MAC header:  RATE_6M_IEEE, etc. */
+       u8 prev_ieee;           /* previous rate in IEEE speeds */
+       u8 next_ieee;           /* next rate in IEEE speeds */
+       u8 prev_rs;             /* previous rate used in rs algo */
+       u8 next_rs;             /* next rate used in rs algo */
+       u8 prev_rs_tgg;         /* previous rate used in TGG rs algo */
+       u8 next_rs_tgg;         /* next rate used in TGG rs algo */
+       u8 table_rs_idx;        /* idx in rate scale table cmd */
+       u8 prev_table_rs;       /* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+       RATE_1M_IDX = 0,
+       RATE_2M_IDX,
+       RATE_5M_IDX,
+       RATE_11M_IDX,
+       RATE_6M_IDX,
+       RATE_9M_IDX,
+       RATE_12M_IDX,
+       RATE_18M_IDX,
+       RATE_24M_IDX,
+       RATE_36M_IDX,
+       RATE_48M_IDX,
+       RATE_54M_IDX,
+       RATE_60M_IDX,
+       RATE_COUNT,
+       RATE_COUNT_LEGACY = RATE_COUNT - 1,     /* Excluding 60M */
+       RATE_COUNT_3945 = RATE_COUNT - 1,
+       RATE_INVM_IDX = RATE_COUNT,
+       RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+       RATE_6M_IDX_TBL = 0,
+       RATE_9M_IDX_TBL,
+       RATE_12M_IDX_TBL,
+       RATE_18M_IDX_TBL,
+       RATE_24M_IDX_TBL,
+       RATE_36M_IDX_TBL,
+       RATE_48M_IDX_TBL,
+       RATE_54M_IDX_TBL,
+       RATE_1M_IDX_TBL,
+       RATE_2M_IDX_TBL,
+       RATE_5M_IDX_TBL,
+       RATE_11M_IDX_TBL,
+       RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+       IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+       IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+       IL_LAST_OFDM_RATE = RATE_60M_IDX,
+       IL_FIRST_CCK_RATE = RATE_1M_IDX,
+       IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define        RATE_6M_MASK   (1 << RATE_6M_IDX)
+#define        RATE_9M_MASK   (1 << RATE_9M_IDX)
+#define        RATE_12M_MASK  (1 << RATE_12M_IDX)
+#define        RATE_18M_MASK  (1 << RATE_18M_IDX)
+#define        RATE_24M_MASK  (1 << RATE_24M_IDX)
+#define        RATE_36M_MASK  (1 << RATE_36M_IDX)
+#define        RATE_48M_MASK  (1 << RATE_48M_IDX)
+#define        RATE_54M_MASK  (1 << RATE_54M_IDX)
+#define RATE_60M_MASK  (1 << RATE_60M_IDX)
+#define        RATE_1M_MASK   (1 << RATE_1M_IDX)
+#define        RATE_2M_MASK   (1 << RATE_2M_IDX)
+#define        RATE_5M_MASK   (1 << RATE_5M_IDX)
+#define        RATE_11M_MASK  (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+       RATE_6M_PLCP = 13,
+       RATE_9M_PLCP = 15,
+       RATE_12M_PLCP = 5,
+       RATE_18M_PLCP = 7,
+       RATE_24M_PLCP = 9,
+       RATE_36M_PLCP = 11,
+       RATE_48M_PLCP = 1,
+       RATE_54M_PLCP = 3,
+       RATE_60M_PLCP = 3,      /*FIXME:RS:should be removed */
+       RATE_1M_PLCP = 10,
+       RATE_2M_PLCP = 20,
+       RATE_5M_PLCP = 55,
+       RATE_11M_PLCP = 110,
+       /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+       RATE_SISO_6M_PLCP = 0,
+       RATE_SISO_12M_PLCP = 1,
+       RATE_SISO_18M_PLCP = 2,
+       RATE_SISO_24M_PLCP = 3,
+       RATE_SISO_36M_PLCP = 4,
+       RATE_SISO_48M_PLCP = 5,
+       RATE_SISO_54M_PLCP = 6,
+       RATE_SISO_60M_PLCP = 7,
+       RATE_MIMO2_6M_PLCP = 0x8,
+       RATE_MIMO2_12M_PLCP = 0x9,
+       RATE_MIMO2_18M_PLCP = 0xa,
+       RATE_MIMO2_24M_PLCP = 0xb,
+       RATE_MIMO2_36M_PLCP = 0xc,
+       RATE_MIMO2_48M_PLCP = 0xd,
+       RATE_MIMO2_54M_PLCP = 0xe,
+       RATE_MIMO2_60M_PLCP = 0xf,
+       RATE_SISO_INVM_PLCP,
+       RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+       RATE_6M_IEEE = 12,
+       RATE_9M_IEEE = 18,
+       RATE_12M_IEEE = 24,
+       RATE_18M_IEEE = 36,
+       RATE_24M_IEEE = 48,
+       RATE_36M_IEEE = 72,
+       RATE_48M_IEEE = 96,
+       RATE_54M_IEEE = 108,
+       RATE_60M_IEEE = 120,
+       RATE_1M_IEEE = 2,
+       RATE_2M_IEEE = 4,
+       RATE_5M_IEEE = 11,
+       RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK    \
+       (RATE_1M_MASK          | \
+       RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK          \
+       (IL_CCK_BASIC_RATES_MASK  | \
+       RATE_5M_MASK          | \
+       RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK   \
+       (RATE_6M_MASK         | \
+       RATE_12M_MASK         | \
+       RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK         \
+       (IL_OFDM_BASIC_RATES_MASK | \
+       RATE_9M_MASK          | \
+       RATE_18M_MASK         | \
+       RATE_36M_MASK         | \
+       RATE_48M_MASK         | \
+       RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK         \
+       (IL_OFDM_BASIC_RATES_MASK | \
+        IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE    -1
+
+#define IL_MIN_RSSI_VAL                 -100
+#define IL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT        160
+#define IL_LEGACY_SUCCESS_LIMIT        480
+#define IL_LEGACY_TBL_COUNT            160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT   400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT   4500
+#define IL_NONE_LEGACY_TBL_COUNT       1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO               12800   /* 100% */
+#define RATE_SCALE_SWITCH              10880   /*  85% */
+#define RATE_HIGH_TH           10880   /*  85% */
+#define RATE_INCREASE_TH               6400    /*  50% */
+#define RATE_DECREASE_TH               1920    /*  15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1      0
+#define IL_LEGACY_SWITCH_ANTENNA2      1
+#define IL_LEGACY_SWITCH_SISO          2
+#define IL_LEGACY_SWITCH_MIMO2_AB      3
+#define IL_LEGACY_SWITCH_MIMO2_AC      4
+#define IL_LEGACY_SWITCH_MIMO2_BC      5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1        0
+#define IL_SISO_SWITCH_ANTENNA2        1
+#define IL_SISO_SWITCH_MIMO2_AB        2
+#define IL_SISO_SWITCH_MIMO2_AC        3
+#define IL_SISO_SWITCH_MIMO2_BC        4
+#define IL_SISO_SWITCH_GI              5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1       0
+#define IL_MIMO2_SWITCH_ANTENNA2       1
+#define IL_MIMO2_SWITCH_SISO_A         2
+#define IL_MIMO2_SWITCH_SISO_B         3
+#define IL_MIMO2_SWITCH_SISO_C         4
+#define IL_MIMO2_SWITCH_GI             5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT                3       /* # possible actions */
+
+#define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD    0
+#define IL_AGG_LOAD_THRESHOLD  10
+#define IL_AGG_ALL_TID         0xff
+#define TID_QUEUE_CELL_SPACING 50      /*mS */
+#define TID_QUEUE_MAX_SIZE     20
+#define TID_ROUND_VALUE                5       /* mS */
+#define TID_MAX_LOAD_COUNT     8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+       LQ_NONE,
+       LQ_G,                   /* legacy types */
+       LQ_A,
+       LQ_SISO,                /* high-throughput types */
+       LQ_MIMO2,
+       LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define        ANT_NONE        0x0
+#define        ANT_A           BIT(0)
+#define        ANT_B           BIT(1)
+#define        ANT_AB          (ANT_A | ANT_B)
+#define ANT_C          BIT(2)
+#define        ANT_AC          (ANT_A | ANT_C)
+#define ANT_BC         (ANT_B | ANT_C)
+#define ANT_ABC                (ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE        12
+
+struct il_rate_mcs_info {
+       char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+       char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+       u64 data;               /* bitmap of successful frames */
+       s32 success_counter;    /* number of frames successful */
+       s32 success_ratio;      /* per-cent * 128  */
+       s32 counter;            /* number of frames attempted */
+       s32 average_tpt;        /* success ratio * expected throughput */
+       unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+       enum il_table_type lq_type;
+       u8 ant_type;
+       u8 is_SGI;              /* 1 = short guard interval */
+       u8 is_ht40;             /* 1 = 40 MHz channel width */
+       u8 is_dup;              /* 1 = duplicated data streams */
+       u8 action;              /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+       u8 max_search;          /* maximun number of tables we can search */
+       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
+       u32 current_rate;       /* rate_n_flags, uCode API format */
+       struct il_rate_scale_data win[RATE_COUNT];      /* rate histories */
+};
+
+struct il_traffic_load {
+       unsigned long time_stamp;       /* age of the oldest stats */
+       u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+                                                * slice */
+       u32 total;              /* total num of packets during the
+                                * last TID_MAX_TIME_DIFF */
+       u8 queue_count;         /* number of queues that has
+                                * been used since the last cleanup */
+       u8 head;                /* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+       u8 active_tbl;          /* idx of active table, range 0-1 */
+       u8 enable_counter;      /* indicates HT mode */
+       u8 stay_in_tbl;         /* 1: disallow, 0: allow search for new mode */
+       u8 search_better_tbl;   /* 1: currently trying alternate mode */
+       s32 last_tpt;
+
+       /* The following determine when to search for a new mode */
+       u32 table_count_limit;
+       u32 max_failure_limit;  /* # failed frames before new search */
+       u32 max_success_limit;  /* # successful frames before new search */
+       u32 table_count;
+       u32 total_failed;       /* total failed frames, any/all rates */
+       u32 total_success;      /* total successful frames, any/all rates */
+       u64 flush_timer;        /* time staying in mode before new search */
+
+       u8 action_counter;      /* # mode-switch actions tried */
+       u8 is_green;
+       u8 is_dup;
+       enum ieee80211_band band;
+
+       /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+       u32 supp_rates;
+       u16 active_legacy_rate;
+       u16 active_siso_rate;
+       u16 active_mimo2_rate;
+       s8 max_rate_idx;        /* Max rate set by user */
+       u8 missed_rate_counter;
+
+       struct il_link_quality_cmd lq;
+       struct il_scale_tbl_info lq_info[LQ_SIZE];      /* "active", "search" */
+       struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+       u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+       struct dentry *rs_sta_dbgfs_scale_table_file;
+       struct dentry *rs_sta_dbgfs_stats_table_file;
+       struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+       struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+       u32 dbg_fixed_rate;
+#endif
+       struct il_priv *drv;
+
+       /* used to be in sta_info */
+       int last_txrate_idx;
+       /* last tx rate_n_flags */
+       u32 last_rate_n_flags;
+       /* packets destined for this STA are aggregated */
+       u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+       struct il_station_priv_common common;
+       struct il_lq_sta lq_sta;
+       atomic_t pending_frames;
+       bool client;
+       bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+       return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+       if (mask & ANT_A)
+               return ANT_A;
+       if (mask & ANT_B)
+               return ANT_B;
+       return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                               u8 sta_id);
+extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                               u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int il4965_rate_control_register(void);
+extern int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void il4965_rate_control_unregister(void);
+extern void il3945_rate_control_unregister(void);
+
+extern int il_power_update_mode(struct il_priv *il, bool force);
+extern void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+       if (il->debug_level)
+               return il->debug_level;
+       else
+               return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+       return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len)                                 \
+do {                                                                   \
+       print_hex_dump(KERN_ERR, "iwl data: ",                          \
+                      DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);           \
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...)                                    \
+do {                                                                   \
+       if (il_get_debug_level(il) & level)                             \
+               dev_printk(KERN_ERR, &il->hw->wiphy->dev,               \
+                        "%c %s " fmt, in_interrupt() ? 'I' : 'U',      \
+                       __func__ , ## args);                            \
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len)                           \
+do {                                                                   \
+       if (il_get_debug_level(il) & level)                             \
+               print_hex_dump(KERN_DEBUG, "iwl data: ",                \
+                              DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);   \
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+       return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ *     /sys/module/iwl4965/parameters/debug
+ *     /sys/module/iwl3945/parameters/debug
+ *     /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO             (1 << 0)
+#define IL_DL_MAC80211         (1 << 1)
+#define IL_DL_HCMD             (1 << 2)
+#define IL_DL_STATE            (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP          (1 << 4)
+#define IL_DL_HCMD_DUMP                (1 << 5)
+#define IL_DL_EEPROM           (1 << 6)
+#define IL_DL_RADIO            (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER            (1 << 8)
+#define IL_DL_TEMP             (1 << 9)
+#define IL_DL_NOTIF            (1 << 10)
+#define IL_DL_SCAN             (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC            (1 << 12)
+#define IL_DL_DROP             (1 << 13)
+#define IL_DL_TXPOWER          (1 << 14)
+#define IL_DL_AP               (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW               (1 << 16)
+#define IL_DL_RF_KILL          (1 << 17)
+#define IL_DL_FW_ERRORS                (1 << 18)
+#define IL_DL_LED              (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE             (1 << 20)
+#define IL_DL_CALIB            (1 << 21)
+#define IL_DL_WEP              (1 << 22)
+#define IL_DL_TX               (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX               (1 << 24)
+#define IL_DL_ISR              (1 << 25)
+#define IL_DL_HT               (1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H              (1 << 28)
+#define IL_DL_STATS            (1 << 29)
+#define IL_DL_TX_REPLY         (1 << 30)
+#define IL_DL_QOS              (1 << 31)
+
+#define D_INFO(f, a...)                IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...)    IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...)     IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...)                IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...)                IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...)          IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...)          IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...)         IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...)         IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...)         IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...)          IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...)     IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...)      IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...)       IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...)          IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...)     IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...)                IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...)          IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...)     IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...)                IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...)       IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...)       IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...)          IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...)       IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...)    IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...)         IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...)       IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...)       IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...)         IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
similarity index 84%
rename from drivers/net/wireless/iwlegacy/iwl-csr.h
rename to drivers/net/wireless/iwlegacy/csr.h
index 668a961..9138e15 100644 (file)
@@ -60,8 +60,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-#ifndef __iwl_legacy_csr_h__
-#define __iwl_legacy_csr_h__
+#ifndef __il_csr_h__
+#define __il_csr_h__
 /*
  * CSR (control and status registers)
  *
@@ -70,9 +70,9 @@
  * low power states due to driver-invoked device resets
  * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
  *
- * Use iwl_write32() and iwl_read32() family to access these registers;
+ * Use _il_wr() and _il_rd() family to access these registers;
  * these provide simple PCI bus access, without waking up the MAC.
- * Do not use iwl_legacy_write_direct32() family for these registers;
+ * Do not use il_wr() family for these registers;
  * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
  * The MAC (uCode processor, etc.) does not need to be powered up for accessing
  * the CSR registers.
  */
 #define CSR_BASE    (0x000)
 
-#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
-#define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
-#define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
-#define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
-#define CSR_GPIO_IN             (CSR_BASE+0x018) /* read external chip pins */
-#define CSR_RESET               (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000)       /* hardware interface config */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004)       /* accum ints, 32-usec units */
+#define CSR_INT                 (CSR_BASE+0x008)       /* host interrupt status/ack */
+#define CSR_INT_MASK            (CSR_BASE+0x00c)       /* host interrupt enable */
+#define CSR_FH_INT_STATUS       (CSR_BASE+0x010)       /* busmaster int status/ack */
+#define CSR_GPIO_IN             (CSR_BASE+0x018)       /* read external chip pins */
+#define CSR_RESET               (CSR_BASE+0x020)       /* busmaster enable, NMI, etc */
 #define CSR_GP_CNTRL            (CSR_BASE+0x024)
 
-/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
 #define CSR_INT_PERIODIC_REG   (CSR_BASE+0x005)
 
 /*
 
 #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A  (0x00080000)
 #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM        (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY     (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE             (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY     (0x00400000)    /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE             (0x08000000)  /* WAKE_ME */
 
-#define CSR_INT_PERIODIC_DIS                   (0x00) /* disable periodic int*/
-#define CSR_INT_PERIODIC_ENA                   (0xFF) /* 255*32 usec ~ 8 msec*/
+#define CSR_INT_PERIODIC_DIS                   (0x00)  /* disable periodic int */
+#define CSR_INT_PERIODIC_ENA                   (0xFF)  /* 255*32 usec ~ 8 msec */
 
 /* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
  * acknowledged (reset) by host writing "1" to flagged bits. */
-#define CSR_INT_BIT_FH_RX        (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
-#define CSR_INT_BIT_HW_ERR       (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_RX_PERIODIC         (1 << 28) /* Rx periodic */
-#define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
-#define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
-#define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
-#define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
-#define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
-#define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses, 3945 */
-#define CSR_INT_BIT_WAKEUP       (1 << 1)  /* NIC controller waking up (pwr mgmt) */
-#define CSR_INT_BIT_ALIVE        (1 << 0)  /* uCode interrupts once it initializes */
+#define CSR_INT_BIT_FH_RX        (1 << 31)     /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR       (1 << 29)     /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC         (1 << 28)      /* Rx periodic */
+#define CSR_INT_BIT_FH_TX        (1 << 27)     /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD          (1 << 26)     /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR       (1 << 25)     /* uCode error */
+#define CSR_INT_BIT_RF_KILL      (1 << 7)      /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL      (1 << 6)      /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX        (1 << 3)      /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP       (1 << 1)      /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE        (1 << 0)      /* uCode interrupts once it initializes */
 
 #define CSR_INI_SET_MASK       (CSR_INT_BIT_FH_RX   | \
                                 CSR_INT_BIT_HW_ERR  | \
                                 CSR_INT_BIT_ALIVE)
 
 /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
-#define CSR_FH_INT_BIT_ERR       (1 << 31) /* Error */
-#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30) /* High priority Rx, bypass coalescing */
-#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18) /* Rx channel 2 (3945 only) */
-#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17) /* Rx channel 1 */
-#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16) /* Rx channel 0 */
-#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)  /* Tx channel 6 (3945 only) */
-#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)  /* Tx channel 1 */
-#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)  /* Tx channel 0 */
+#define CSR_FH_INT_BIT_ERR       (1 << 31)     /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30)     /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18)   /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17)     /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16)     /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)    /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)      /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)      /* Tx channel 0 */
 
 #define CSR39_FH_INT_RX_MASK   (CSR_FH_INT_BIT_HI_PRIOR | \
                                 CSR39_FH_INT_BIT_RX_CHNL2 | \
                                 CSR_FH_INT_BIT_RX_CHNL1 | \
                                 CSR_FH_INT_BIT_RX_CHNL0)
 
-
 #define CSR39_FH_INT_TX_MASK   (CSR39_FH_INT_BIT_TX_CHNL6 | \
                                 CSR_FH_INT_BIT_TX_CHNL1 | \
                                 CSR_FH_INT_BIT_TX_CHNL0)
 #define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE         (0x04000000)
 #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW          (0x08000000)
 
-
 /* EEPROM REG */
 #define CSR_EEPROM_REG_READ_VALID_MSK  (0x00000001)
 #define CSR_EEPROM_REG_BIT_CMD         (0x00000002)
 #define CSR_EEPROM_REG_MSK_DATA                (0xFFFF0000)
 
 /* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK                (0x00000007) /* signature */
+#define CSR_EEPROM_GP_VALID_MSK                (0x00000007)    /* signature */
 #define CSR_EEPROM_GP_IF_OWNER_MSK     (0x00000180)
 #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K                (0x00000002)
 #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K                (0x00000004)
 
 /* GP REG */
-#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000)       /* bit 24/25 */
 #define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
 #define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
 #define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
 #define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
 
-
 /* CSR GIO */
 #define CSR_GIO_REG_VAL_L0S_ENABLED    (0x00000002)
 
 /* HPET MEM debug */
 #define CSR_DBG_HPET_MEM_REG_VAL       (0xFFFF0000)
 
-/* DRAM INT TABLE */
+/* DRAM INT TBL */
 #define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
 
  * to indirectly access device's internal memory or registers that
  * may be powered-down.
  *
- * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * Use il_wr()/il_rd() family
  * for these registers;
  * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
  * to make sure the MAC (uCode processor, etc.) is powered up for accessing
  * internal resources.
  *
- * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * Do not use _il_wr()/_il_rd() family to access these registers;
  * these provide only simple PCI bus access, without waking up the MAC.
  */
 #define HBUS_BASE      (0x400)
 #define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
 
 /*
- * Per-Tx-queue write pointer (index, really!)
- * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
  * Bit usage:
- *  0-7:  queue write index
+ *  0-7:  queue write idx
  * 11-8:  queue selector
  */
 #define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
 
-#endif /* !__iwl_legacy_csr_h__ */
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644 (file)
index 0000000..b1b8926
--- /dev/null
@@ -0,0 +1,1411 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
+       if (!debugfs_create_file(#name, mode, parent, il,               \
+                        &il_dbgfs_##name##_ops))               \
+               goto err;                                               \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {                       \
+       struct dentry *__tmp;                                           \
+       __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR,           \
+                                   parent, ptr);                       \
+       if (IS_ERR(__tmp) || !__tmp)                                    \
+               goto err;                                               \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do {                                \
+       struct dentry *__tmp;                                           \
+       __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR,            \
+                                  parent, ptr);                        \
+       if (IS_ERR(__tmp) || !__tmp)                                    \
+               goto err;                                               \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name)                                         \
+static ssize_t il_dbgfs_##name##_read(struct file *file,               \
+                                       char __user *user_buf,          \
+                                       size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)                                        \
+static ssize_t il_dbgfs_##name##_write(struct file *file,              \
+                                       const char __user *user_buf,    \
+                                       size_t count, loff_t *ppos);
+
+static int
+il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name)                            \
+       DEBUGFS_READ_FUNC(name);                                \
+static const struct file_operations il_dbgfs_##name##_ops = {  \
+       .read = il_dbgfs_##name##_read,                         \
+       .open = il_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                          \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)                           \
+       DEBUGFS_WRITE_FUNC(name);                               \
+static const struct file_operations il_dbgfs_##name##_ops = {  \
+       .write = il_dbgfs_##name##_write,                       \
+       .open = il_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                          \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)                      \
+       DEBUGFS_READ_FUNC(name);                                \
+       DEBUGFS_WRITE_FUNC(name);                               \
+static const struct file_operations il_dbgfs_##name##_ops = {  \
+       .write = il_dbgfs_##name##_write,                       \
+       .read = il_dbgfs_##name##_read,                         \
+       .open = il_dbgfs_open_file_generic,                     \
+       .llseek = generic_file_llseek,                          \
+};
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+                      loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       char *buf;
+       int pos = 0;
+
+       int cnt;
+       ssize_t ret;
+       const size_t bufsz =
+           100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+                             il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+                             il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+                     il->tx_stats.data_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+                     il->tx_stats.data_bytes);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+                                  const char __user *user_buf, size_t count,
+                                  loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       u32 clear_flag;
+       char buf[8];
+       int buf_size;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%x", &clear_flag) != 1)
+               return -EFAULT;
+       il_clear_traffic_stats(il);
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+                      loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       char *buf;
+       int pos = 0;
+       int cnt;
+       ssize_t ret;
+       const size_t bufsz =
+           100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+                             il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+                             il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+                     il->rx_stats.data_cnt);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+                     il->rx_stats.data_bytes);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+                  loff_t *ppos)
+{
+       u32 val;
+       char *buf;
+       ssize_t ret;
+       int i;
+       int pos = 0;
+       struct il_priv *il = file->private_data;
+       size_t bufsz;
+
+       /* default is to dump the entire data segment */
+       if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+               il->dbgfs_sram_offset = 0x800000;
+               if (il->ucode_type == UCODE_INIT)
+                       il->dbgfs_sram_len = il->ucode_init_data.len;
+               else
+                       il->dbgfs_sram_len = il->ucode_data.len;
+       }
+       bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+                     il->dbgfs_sram_len);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+                     il->dbgfs_sram_offset);
+       for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+               val =
+                   il_read_targ_mem(il,
+                                    il->dbgfs_sram_offset +
+                                    il->dbgfs_sram_len - i);
+               if (i < 4) {
+                       switch (i) {
+                       case 1:
+                               val &= BYTE1_MASK;
+                               break;
+                       case 2:
+                               val &= BYTE2_MASK;
+                               break;
+                       case 3:
+                               val &= BYTE3_MASK;
+                               break;
+                       }
+               }
+               if (!(i % 16))
+                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+               pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+                   size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[64];
+       int buf_size;
+       u32 offset, len;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+               il->dbgfs_sram_offset = offset;
+               il->dbgfs_sram_len = len;
+       } else {
+               il->dbgfs_sram_offset = 0;
+               il->dbgfs_sram_len = 0;
+       }
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+                      loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       struct il_station_entry *station;
+       int max_sta = il->hw_params.max_stations;
+       char *buf;
+       int i, j, pos = 0;
+       ssize_t ret;
+       /* Add 30 for initial string */
+       const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+                     il->num_stations);
+
+       for (i = 0; i < max_sta; i++) {
+               station = &il->stations[i];
+               if (!station->used)
+                       continue;
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "station %d - addr: %pM, flags: %#x\n", i,
+                             station->sta.sta.addr,
+                             station->sta.station_flags_msk);
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+               for (j = 0; j < MAX_TID_COUNT; j++) {
+                       pos +=
+                           scnprintf(buf + pos, bufsz - pos,
+                                     "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+                                     j, station->tid[j].seq_number,
+                                     station->tid[j].agg.txq_id,
+                                     station->tid[j].agg.frame_count,
+                                     station->tid[j].tfds_in_queue,
+                                     station->tid[j].agg.start_idx,
+                                     station->tid[j].agg.bitmap,
+                                     station->tid[j].agg.rate_n_flags);
+
+                       if (station->tid[j].agg.wait_for_ba)
+                               pos +=
+                                   scnprintf(buf + pos, bufsz - pos,
+                                             " - waitforba");
+                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+               }
+
+               pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+                 loff_t *ppos)
+{
+       ssize_t ret;
+       struct il_priv *il = file->private_data;
+       int pos = 0, ofs = 0, buf_size = 0;
+       const u8 *ptr;
+       char *buf;
+       u16 eeprom_ver;
+       size_t eeprom_len = il->cfg->base_params->eeprom_size;
+       buf_size = 4 * eeprom_len + 256;
+
+       if (eeprom_len % 16) {
+               IL_ERR("NVM size is not multiple of 16.\n");
+               return -ENODATA;
+       }
+
+       ptr = il->eeprom;
+       if (!ptr) {
+               IL_ERR("Invalid EEPROM memory\n");
+               return -ENOMEM;
+       }
+
+       /* 4 characters for byte 0xYY */
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+       eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+       pos +=
+           scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+                     eeprom_ver);
+       for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+               hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
+                                  buf_size - pos, 0);
+               pos += strlen(buf + pos);
+               if (buf_size - pos > 0)
+                       buf[pos++] = '\n';
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+                      loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       struct ieee80211_channel *channels = NULL;
+       const struct ieee80211_supported_band *supp_band = NULL;
+       int pos = 0, i, bufsz = PAGE_SIZE;
+       char *buf;
+       ssize_t ret;
+
+       if (!test_bit(S_GEO_CONFIGURED, &il->status))
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+       if (supp_band) {
+               channels = supp_band->channels;
+
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+                             supp_band->n_channels);
+
+               for (i = 0; i < supp_band->n_channels; i++)
+                       pos +=
+                           scnprintf(buf + pos, bufsz - pos,
+                                     "%d: %ddBm: BSS%s%s, %s.\n",
+                                     channels[i].hw_value,
+                                     channels[i].max_power,
+                                     channels[i].
+                                     flags & IEEE80211_CHAN_RADAR ?
+                                     " (IEEE 802.11h required)" : "",
+                                     ((channels[i].
+                                       flags & IEEE80211_CHAN_NO_IBSS) ||
+                                      (channels[i].
+                                       flags & IEEE80211_CHAN_RADAR)) ? "" :
+                                     ", IBSS",
+                                     channels[i].
+                                     flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+                                     "passive only" : "active/passive");
+       }
+       supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+       if (supp_band) {
+               channels = supp_band->channels;
+
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "Displaying %d channels in 5.2GHz band (802.11a)\n",
+                             supp_band->n_channels);
+
+               for (i = 0; i < supp_band->n_channels; i++)
+                       pos +=
+                           scnprintf(buf + pos, bufsz - pos,
+                                     "%d: %ddBm: BSS%s%s, %s.\n",
+                                     channels[i].hw_value,
+                                     channels[i].max_power,
+                                     channels[i].
+                                     flags & IEEE80211_CHAN_RADAR ?
+                                     " (IEEE 802.11h required)" : "",
+                                     ((channels[i].
+                                       flags & IEEE80211_CHAN_NO_IBSS) ||
+                                      (channels[i].
+                                       flags & IEEE80211_CHAN_RADAR)) ? "" :
+                                     ", IBSS",
+                                     channels[i].
+                                     flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+                                     "passive only" : "active/passive");
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+                    loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       char buf[512];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+                     test_bit(S_HCMD_ACTIVE, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+                     test_bit(S_INT_ENABLED, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
+                     test_bit(S_RF_KILL_HW, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+                     test_bit(S_CT_KILL, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+                     test_bit(S_INIT, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+                     test_bit(S_ALIVE, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+                     test_bit(S_READY, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+                     test_bit(S_TEMPERATURE, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+                     test_bit(S_GEO_CONFIGURED, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+                     test_bit(S_EXIT_PENDING, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+                     test_bit(S_STATS, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+                     test_bit(S_SCANNING, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+                     test_bit(S_SCAN_ABORTING, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+                     test_bit(S_SCAN_HW, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+                     test_bit(S_POWER_PMI, &il->status));
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+                     test_bit(S_FW_ERROR, &il->status));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+                       loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = 24 * 64;    /* 24 items * 64 char per item */
+       ssize_t ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+                     il->isr_stats.hw);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+                     il->isr_stats.sw);
+       if (il->isr_stats.sw || il->isr_stats.hw) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "\tLast Restarting Code:  0x%X\n",
+                             il->isr_stats.err_code);
+       }
+#ifdef CONFIG_IWLEGACY_DEBUG
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+                     il->isr_stats.sch);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+                     il->isr_stats.alive);
+#endif
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "HW RF KILL switch toggled:\t %u\n",
+                     il->isr_stats.rfkill);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+                     il->isr_stats.ctkill);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+                     il->isr_stats.wakeup);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+                     il->isr_stats.rx);
+       for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+               if (il->isr_stats.handlers[cnt] > 0)
+                       pos +=
+                           scnprintf(buf + pos, bufsz - pos,
+                                     "\tRx handler[%36s]:\t\t %u\n",
+                                     il_get_cmd_string(cnt),
+                                     il->isr_stats.handlers[cnt]);
+       }
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+                     il->isr_stats.tx);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+                     il->isr_stats.unhandled);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+                        size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[8];
+       int buf_size;
+       u32 reset_flag;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%x", &reset_flag) != 1)
+               return -EFAULT;
+       if (reset_flag == 0)
+               il_clear_isr_stats(il);
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+                 loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       struct il_rxon_context *ctx = &il->ctx;
+       int pos = 0, i;
+       char buf[256];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
+       for (i = 0; i < AC_NUM; i++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "\tcw_min\tcw_max\taifsn\ttxop\n");
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+                             ctx->qos_data.def_qos_parm.ac[i].cw_min,
+                             ctx->qos_data.def_qos_parm.ac[i].cw_max,
+                             ctx->qos_data.def_qos_parm.ac[i].aifsn,
+                             ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+       }
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+                           size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[8];
+       int buf_size;
+       int ht40;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &ht40) != 1)
+               return -EFAULT;
+       if (!il_is_any_associated(il))
+               il->disable_ht40 = ht40 ? true : false;
+       else {
+               IL_ERR("Sta associated with AP - "
+                      "Change to 40MHz channel support is not allowed\n");
+               return -EINVAL;
+       }
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[100];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+                     il->disable_ht40 ? "Disabled" : "Enabled");
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
+                         size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       int pos = 0, ofs = 0;
+       int cnt = 0, entry;
+       struct il_tx_queue *txq;
+       struct il_queue *q;
+       struct il_rx_queue *rxq = &il->rxq;
+       char *buf;
+       int bufsz =
+           ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+           (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
+       const u8 *ptr;
+       ssize_t ret;
+
+       if (!il->txq) {
+               IL_ERR("txq not ready\n");
+               return -EAGAIN;
+       }
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate buffer\n");
+               return -ENOMEM;
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+       for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+               txq = &il->txq[cnt];
+               q = &txq->q;
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
+                             q->read_ptr, q->write_ptr);
+       }
+       if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
+               ptr = il->tx_traffic;
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
+                             il->tx_traffic_idx);
+               for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+                       for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+                            entry++, ofs += 16) {
+                               pos +=
+                                   scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+                                             ofs);
+                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+                                                  buf + pos, bufsz - pos, 0);
+                               pos += strlen(buf + pos);
+                               if (bufsz - pos > 0)
+                                       buf[pos++] = '\n';
+                       }
+               }
+       }
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
+                     rxq->read, rxq->write);
+
+       if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
+               ptr = il->rx_traffic;
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
+                             il->rx_traffic_idx);
+               for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+                       for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+                            entry++, ofs += 16) {
+                               pos +=
+                                   scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+                                             ofs);
+                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+                                                  buf + pos, bufsz - pos, 0);
+                               pos += strlen(buf + pos);
+                               if (bufsz - pos > 0)
+                                       buf[pos++] = '\n';
+                       }
+               }
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[8];
+       int buf_size;
+       int traffic_log;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &traffic_log) != 1)
+               return -EFAULT;
+       if (traffic_log == 0)
+               il_reset_traffic_log(il);
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+                      loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       struct il_tx_queue *txq;
+       struct il_queue *q;
+       char *buf;
+       int pos = 0;
+       int cnt;
+       int ret;
+       const size_t bufsz =
+           sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+
+       if (!il->txq) {
+               IL_ERR("txq not ready\n");
+               return -EAGAIN;
+       }
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+               txq = &il->txq[cnt];
+               q = &txq->q;
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "hwq %.2d: read=%u write=%u stop=%d"
+                             " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+                             q->read_ptr, q->write_ptr,
+                             !!test_bit(cnt, il->queue_stopped),
+                             txq->swq_id, txq->swq_id & 3,
+                             (txq->swq_id >> 2) & 0x1f);
+               if (cnt >= 4)
+                       continue;
+               /* for the ACs, display the stop count too */
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "        stop-count: %d\n",
+                             atomic_read(&il->queue_stop_count[cnt]));
+       }
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+                      loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       struct il_rx_queue *rxq = &il->rxq;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+       pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+                     rxq->free_count);
+       if (rxq->rb_stts) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+                             le16_to_cpu(rxq->rb_stts->
+                                         closed_rb_num) & 0x0FFF);
+       } else {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos,
+                             "closed_rb_num: Not Allocated\n");
+       }
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
+                                                           count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
+                                                           count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
+                                                                count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+                         size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+       ssize_t ret;
+       struct il_sensitivity_data *data;
+
+       data = &il->sensitivity_data;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+                     data->auto_corr_ofdm);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+                     data->auto_corr_ofdm_mrc);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+                     data->auto_corr_ofdm_x1);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+                     data->auto_corr_ofdm_mrc_x1);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+                     data->auto_corr_cck);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+                     data->auto_corr_cck_mrc);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+                     data->last_bad_plcp_cnt_ofdm);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+                     data->last_fa_cnt_ofdm);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+                     data->last_bad_plcp_cnt_cck);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+                     data->last_fa_cnt_cck);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+                     data->nrg_curr_state);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+                     data->nrg_prev_state);
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+       for (cnt = 0; cnt < 10; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, " %u",
+                             data->nrg_value[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+       for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, " %u",
+                             data->nrg_silence_rssi[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+                     data->nrg_silence_ref);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+                     data->nrg_energy_idx);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+                     data->nrg_silence_idx);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+                     data->nrg_th_cck);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "nrg_auto_corr_silence_diff:\t %u\n",
+                     data->nrg_auto_corr_silence_diff);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+                     data->num_in_cck_no_fa);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+                     data->nrg_th_ofdm);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+                         size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       int cnt = 0;
+       char *buf;
+       int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+       ssize_t ret;
+       struct il_chain_noise_data *data;
+
+       data = &il->chain_noise_data;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf) {
+               IL_ERR("Can not allocate Buffer\n");
+               return -ENOMEM;
+       }
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+                     data->active_chains);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+                     data->chain_noise_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+                     data->chain_noise_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+                     data->chain_noise_c);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+                     data->chain_signal_a);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+                     data->chain_signal_b);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+                     data->chain_signal_c);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+                     data->beacon_count);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, " %u",
+                             data->disconn_array[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+               pos +=
+                   scnprintf(buf + pos, bufsz - pos, " %u",
+                             data->delta_gain_code[cnt]);
+       }
+       pos += scnprintf(buf + pos, bufsz - pos, "\n");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+                     data->radio_write);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+                     data->state);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[60];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       u32 pwrsave_status;
+
+       pwrsave_status =
+           _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "%s\n",
+                     (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+                     (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+                     (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+                     "error");
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+                                const char __user *user_buf, size_t count,
+                                loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[8];
+       int buf_size;
+       int clear;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &clear) != 1)
+               return -EFAULT;
+
+       /* make request to uCode to retrieve stats information */
+       mutex_lock(&il->mutex);
+       il_send_stats_request(il, CMD_SYNC, true);
+       mutex_unlock(&il->mutex);
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+                        size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int len = 0;
+       char buf[20];
+
+       len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int len = 0;
+       char buf[20];
+
+       len =
+           sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+                    loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char *buf;
+       int pos = 0;
+       ssize_t ret = -EFAULT;
+
+       if (il->cfg->ops->lib->dump_fh) {
+               ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+               if (buf) {
+                       ret =
+                           simple_read_from_buffer(user_buf, count, ppos, buf,
+                                                   pos);
+                       kfree(buf);
+               }
+       }
+
+       return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+                           size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char buf[12];
+       const size_t bufsz = sizeof(buf);
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "%d\n",
+                     il->missed_beacon_threshold);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       struct il_priv *il = file->private_data;
+       char buf[8];
+       int buf_size;
+       int missed;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &missed) != 1)
+               return -EINVAL;
+
+       if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+           missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+               il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+       else
+               il->missed_beacon_threshold = missed;
+
+       return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+                         size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       int pos = 0;
+       char buf[300];
+       const size_t bufsz = sizeof(buf);
+       struct il_force_reset *force_reset;
+
+       force_reset = &il->force_reset;
+
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+                     force_reset->reset_request_count);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "\tnumber of reset request success: %d\n",
+                     force_reset->reset_success_count);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos,
+                     "\tnumber of reset request reject: %d\n",
+                     force_reset->reset_reject_count);
+       pos +=
+           scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+                     force_reset->reset_duration);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+
+       int ret;
+       struct il_priv *il = file->private_data;
+
+       ret = il_force_reset(il, true);
+
+       return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+                         size_t count, loff_t *ppos)
+{
+
+       struct il_priv *il = file->private_data;
+       char buf[8];
+       int buf_size;
+       int timeout;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%d", &timeout) != 1)
+               return -EINVAL;
+       if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+               timeout = IL_DEF_WD_TIMEOUT;
+
+       il->cfg->base_params->wd_timeout = timeout;
+       il_setup_watchdog(il);
+       return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+       struct dentry *phyd = il->hw->wiphy->debugfsdir;
+       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+       dir_drv = debugfs_create_dir(name, phyd);
+       if (!dir_drv)
+               return -ENOMEM;
+
+       il->debugfs_dir = dir_drv;
+
+       dir_data = debugfs_create_dir("data", dir_drv);
+       if (!dir_data)
+               goto err;
+       dir_rf = debugfs_create_dir("rf", dir_drv);
+       if (!dir_rf)
+               goto err;
+       dir_debug = debugfs_create_dir("debug", dir_drv);
+       if (!dir_debug)
+               goto err;
+
+       DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+       DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+       if (il->cfg->base_params->sensitivity_calib_by_driver)
+               DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+       if (il->cfg->base_params->chain_noise_calib_by_driver)
+               DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+       DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+       DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+       if (il->cfg->base_params->sensitivity_calib_by_driver)
+               DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+                                &il->disable_sens_cal);
+       if (il->cfg->base_params->chain_noise_calib_by_driver)
+               DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+                                &il->disable_chain_noise_cal);
+       DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+       return 0;
+
+err:
+       IL_ERR("Can't create the debugfs directory\n");
+       il_dbgfs_unregister(il);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+       if (!il->debugfs_dir)
+               return;
+
+       debugfs_remove_recursive(il->debugfs_dir);
+       il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644 (file)
index cfabb38..0000000
+++ /dev/null
@@ -1,523 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
-       int p = 0;
-
-       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
-                      le32_to_cpu(priv->_3945.statistics.flag));
-       if (le32_to_cpu(priv->_3945.statistics.flag) &
-                       UCODE_STATISTICS_CLEAR_MSK)
-               p += scnprintf(buf + p, bufsz - p,
-                              "\tStatistics have been cleared\n");
-       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
-                      (le32_to_cpu(priv->_3945.statistics.flag) &
-                       UCODE_STATISTICS_FREQUENCY_MSK)
-                       ? "2.4 GHz" : "5.2 GHz");
-       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
-                      (le32_to_cpu(priv->_3945.statistics.flag) &
-                       UCODE_STATISTICS_NARROW_BAND_MSK)
-                       ? "enabled" : "disabled");
-       return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
-                                   char __user *user_buf,
-                                   size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
-                   sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
-       ssize_t ret;
-       struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
-                                       *max_ofdm;
-       struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
-       struct iwl39_statistics_rx_non_phy *general, *accum_general;
-       struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * The statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       ofdm = &priv->_3945.statistics.rx.ofdm;
-       cck = &priv->_3945.statistics.rx.cck;
-       general = &priv->_3945.statistics.rx.general;
-       accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
-       accum_cck = &priv->_3945.accum_statistics.rx.cck;
-       accum_general = &priv->_3945.accum_statistics.rx.general;
-       delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
-       delta_cck = &priv->_3945.delta_statistics.rx.cck;
-       delta_general = &priv->_3945.delta_statistics.rx.general;
-       max_ofdm = &priv->_3945.max_delta.rx.ofdm;
-       max_cck = &priv->_3945.max_delta.rx.cck;
-       max_general = &priv->_3945.max_delta.rx.general;
-
-       pos += iwl3945_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Rx - OFDM:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
-                        accum_ofdm->ina_cnt,
-                        delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_cnt:",
-                        le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
-                        delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
-                        le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
-                        delta_ofdm->plcp_err, max_ofdm->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",  "crc32_err:",
-                        le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
-                        delta_ofdm->crc32_err, max_ofdm->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
-                        le32_to_cpu(ofdm->overrun_err),
-                        accum_ofdm->overrun_err, delta_ofdm->overrun_err,
-                        max_ofdm->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "early_overrun_err:",
-                        le32_to_cpu(ofdm->early_overrun_err),
-                        accum_ofdm->early_overrun_err,
-                        delta_ofdm->early_overrun_err,
-                        max_ofdm->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "crc32_good:", le32_to_cpu(ofdm->crc32_good),
-                        accum_ofdm->crc32_good, delta_ofdm->crc32_good,
-                        max_ofdm->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
-                        le32_to_cpu(ofdm->false_alarm_cnt),
-                        accum_ofdm->false_alarm_cnt,
-                        delta_ofdm->false_alarm_cnt,
-                        max_ofdm->false_alarm_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_sync_err_cnt:",
-                        le32_to_cpu(ofdm->fina_sync_err_cnt),
-                        accum_ofdm->fina_sync_err_cnt,
-                        delta_ofdm->fina_sync_err_cnt,
-                        max_ofdm->fina_sync_err_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sfd_timeout:",
-                        le32_to_cpu(ofdm->sfd_timeout),
-                        accum_ofdm->sfd_timeout,
-                        delta_ofdm->sfd_timeout,
-                        max_ofdm->sfd_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_timeout:",
-                        le32_to_cpu(ofdm->fina_timeout),
-                        accum_ofdm->fina_timeout,
-                        delta_ofdm->fina_timeout,
-                        max_ofdm->fina_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "unresponded_rts:",
-                        le32_to_cpu(ofdm->unresponded_rts),
-                        accum_ofdm->unresponded_rts,
-                        delta_ofdm->unresponded_rts,
-                        max_ofdm->unresponded_rts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "rxe_frame_lmt_ovrun:",
-                        le32_to_cpu(ofdm->rxe_frame_limit_overrun),
-                        accum_ofdm->rxe_frame_limit_overrun,
-                        delta_ofdm->rxe_frame_limit_overrun,
-                        max_ofdm->rxe_frame_limit_overrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_ack_cnt:",
-                        le32_to_cpu(ofdm->sent_ack_cnt),
-                        accum_ofdm->sent_ack_cnt,
-                        delta_ofdm->sent_ack_cnt,
-                        max_ofdm->sent_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_cts_cnt:",
-                        le32_to_cpu(ofdm->sent_cts_cnt),
-                        accum_ofdm->sent_cts_cnt,
-                        delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Rx - CCK:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "ina_cnt:",
-                        le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
-                        delta_cck->ina_cnt, max_cck->ina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_cnt:",
-                        le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
-                        delta_cck->fina_cnt, max_cck->fina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "plcp_err:",
-                        le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
-                        delta_cck->plcp_err, max_cck->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "crc32_err:",
-                        le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
-                        delta_cck->crc32_err, max_cck->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "overrun_err:",
-                        le32_to_cpu(cck->overrun_err),
-                        accum_cck->overrun_err,
-                        delta_cck->overrun_err, max_cck->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "early_overrun_err:",
-                        le32_to_cpu(cck->early_overrun_err),
-                        accum_cck->early_overrun_err,
-                        delta_cck->early_overrun_err,
-                        max_cck->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "crc32_good:",
-                        le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
-                        delta_cck->crc32_good,
-                        max_cck->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "false_alarm_cnt:",
-                        le32_to_cpu(cck->false_alarm_cnt),
-                        accum_cck->false_alarm_cnt,
-                        delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_sync_err_cnt:",
-                        le32_to_cpu(cck->fina_sync_err_cnt),
-                        accum_cck->fina_sync_err_cnt,
-                        delta_cck->fina_sync_err_cnt,
-                        max_cck->fina_sync_err_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sfd_timeout:",
-                        le32_to_cpu(cck->sfd_timeout),
-                        accum_cck->sfd_timeout,
-                        delta_cck->sfd_timeout, max_cck->sfd_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "fina_timeout:",
-                        le32_to_cpu(cck->fina_timeout),
-                        accum_cck->fina_timeout,
-                        delta_cck->fina_timeout, max_cck->fina_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "unresponded_rts:",
-                        le32_to_cpu(cck->unresponded_rts),
-                        accum_cck->unresponded_rts,
-                        delta_cck->unresponded_rts,
-                        max_cck->unresponded_rts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "rxe_frame_lmt_ovrun:",
-                        le32_to_cpu(cck->rxe_frame_limit_overrun),
-                        accum_cck->rxe_frame_limit_overrun,
-                        delta_cck->rxe_frame_limit_overrun,
-                        max_cck->rxe_frame_limit_overrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_ack_cnt:",
-                        le32_to_cpu(cck->sent_ack_cnt),
-                        accum_cck->sent_ack_cnt,
-                        delta_cck->sent_ack_cnt,
-                        max_cck->sent_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sent_cts_cnt:",
-                        le32_to_cpu(cck->sent_cts_cnt),
-                        accum_cck->sent_cts_cnt,
-                        delta_cck->sent_cts_cnt,
-                        max_cck->sent_cts_cnt);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Rx - GENERAL:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bogus_cts:",
-                        le32_to_cpu(general->bogus_cts),
-                        accum_general->bogus_cts,
-                        delta_general->bogus_cts, max_general->bogus_cts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bogus_ack:",
-                        le32_to_cpu(general->bogus_ack),
-                        accum_general->bogus_ack,
-                        delta_general->bogus_ack, max_general->bogus_ack);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "non_bssid_frames:",
-                        le32_to_cpu(general->non_bssid_frames),
-                        accum_general->non_bssid_frames,
-                        delta_general->non_bssid_frames,
-                        max_general->non_bssid_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "filtered_frames:",
-                        le32_to_cpu(general->filtered_frames),
-                        accum_general->filtered_frames,
-                        delta_general->filtered_frames,
-                        max_general->filtered_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "non_channel_beacons:",
-                        le32_to_cpu(general->non_channel_beacons),
-                        accum_general->non_channel_beacons,
-                        delta_general->non_channel_beacons,
-                        max_general->non_channel_beacons);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
-                                   char __user *user_buf,
-                                   size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
-       ssize_t ret;
-       struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * The statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       tx = &priv->_3945.statistics.tx;
-       accum_tx = &priv->_3945.accum_statistics.tx;
-       delta_tx = &priv->_3945.delta_statistics.tx;
-       max_tx = &priv->_3945.max_delta.tx;
-       pos += iwl3945_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_Tx:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "preamble:",
-                        le32_to_cpu(tx->preamble_cnt),
-                        accum_tx->preamble_cnt,
-                        delta_tx->preamble_cnt, max_tx->preamble_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "rx_detected_cnt:",
-                        le32_to_cpu(tx->rx_detected_cnt),
-                        accum_tx->rx_detected_cnt,
-                        delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bt_prio_defer_cnt:",
-                        le32_to_cpu(tx->bt_prio_defer_cnt),
-                        accum_tx->bt_prio_defer_cnt,
-                        delta_tx->bt_prio_defer_cnt,
-                        max_tx->bt_prio_defer_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "bt_prio_kill_cnt:",
-                        le32_to_cpu(tx->bt_prio_kill_cnt),
-                        accum_tx->bt_prio_kill_cnt,
-                        delta_tx->bt_prio_kill_cnt,
-                        max_tx->bt_prio_kill_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "few_bytes_cnt:",
-                        le32_to_cpu(tx->few_bytes_cnt),
-                        accum_tx->few_bytes_cnt,
-                        delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "cts_timeout:",
-                        le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
-                        delta_tx->cts_timeout, max_tx->cts_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "ack_timeout:",
-                        le32_to_cpu(tx->ack_timeout),
-                        accum_tx->ack_timeout,
-                        delta_tx->ack_timeout, max_tx->ack_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "expected_ack_cnt:",
-                        le32_to_cpu(tx->expected_ack_cnt),
-                        accum_tx->expected_ack_cnt,
-                        delta_tx->expected_ack_cnt,
-                        max_tx->expected_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "actual_ack_cnt:",
-                        le32_to_cpu(tx->actual_ack_cnt),
-                        accum_tx->actual_ack_cnt,
-                        delta_tx->actual_ack_cnt,
-                        max_tx->actual_ack_cnt);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
-       ssize_t ret;
-       struct iwl39_statistics_general *general, *accum_general;
-       struct iwl39_statistics_general *delta_general, *max_general;
-       struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
-       struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * The statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       general = &priv->_3945.statistics.general;
-       dbg = &priv->_3945.statistics.general.dbg;
-       div = &priv->_3945.statistics.general.div;
-       accum_general = &priv->_3945.accum_statistics.general;
-       delta_general = &priv->_3945.delta_statistics.general;
-       max_general = &priv->_3945.max_delta.general;
-       accum_dbg = &priv->_3945.accum_statistics.general.dbg;
-       delta_dbg = &priv->_3945.delta_statistics.general.dbg;
-       max_dbg = &priv->_3945.max_delta.general.dbg;
-       accum_div = &priv->_3945.accum_statistics.general.div;
-       delta_div = &priv->_3945.delta_statistics.general.div;
-       max_div = &priv->_3945.max_delta.general.div;
-       pos += iwl3945_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos, "%-32s     current"
-                        "acumulative       delta         max\n",
-                        "Statistics_General:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "burst_check:",
-                        le32_to_cpu(dbg->burst_check),
-                        accum_dbg->burst_check,
-                        delta_dbg->burst_check, max_dbg->burst_check);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "burst_count:",
-                        le32_to_cpu(dbg->burst_count),
-                        accum_dbg->burst_count,
-                        delta_dbg->burst_count, max_dbg->burst_count);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "sleep_time:",
-                        le32_to_cpu(general->sleep_time),
-                        accum_general->sleep_time,
-                        delta_general->sleep_time, max_general->sleep_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "slots_out:",
-                        le32_to_cpu(general->slots_out),
-                        accum_general->slots_out,
-                        delta_general->slots_out, max_general->slots_out);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "slots_idle:",
-                        le32_to_cpu(general->slots_idle),
-                        accum_general->slots_idle,
-                        delta_general->slots_idle, max_general->slots_idle);
-       pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
-                        le32_to_cpu(general->ttl_timestamp));
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "tx_on_a:",
-                        le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
-                        delta_div->tx_on_a, max_div->tx_on_a);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "tx_on_b:",
-                        le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
-                        delta_div->tx_on_b, max_div->tx_on_b);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "exec_time:",
-                        le32_to_cpu(div->exec_time), accum_div->exec_time,
-                        delta_div->exec_time, max_div->exec_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        "  %-30s %10u  %10u  %10u  %10u\n",
-                        "probe_time:",
-                        le32_to_cpu(div->probe_time), accum_div->probe_time,
-                        delta_div->probe_time, max_div->probe_time);
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644 (file)
index 8fef4b3..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-                                   size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-                                   size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-                                        char __user *user_buf, size_t count,
-                                        loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
-                                          char __user *user_buf, size_t count,
-                                          loff_t *ppos)
-{
-       return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
-                                          char __user *user_buf, size_t count,
-                                          loff_t *ppos)
-{
-       return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos)
-{
-       return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644 (file)
index 836c991..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND                   (0x0800)
-#define FH39_MEM_UPPER_BOUND                   (0x1000)
-
-#define FH39_CBCC_TABLE                (FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE                (FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE                (FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf)                    (FH39_TFDB_TABLE + \
-                                                ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)       (FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch)         (FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch)    (FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch)    (FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch)                 (FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch)          (FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch)                (FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch)            (FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch)       (FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR          (FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL                 (FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS               (FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch)                 (FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch)          (FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch)          (FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch)       (FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL                            (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128          (0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST          (0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH                        (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF               (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER            (0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL     (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL      (0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD            (0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT             (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE             (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE            (0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID           (0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR            (0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON       (0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON       (0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B     (0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON                (0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON                (0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH      (0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH            (0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)    (BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)   (BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
-       (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
-        FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE                    (0x01000000)
-
-struct iwl3945_tfd_tb {
-       __le32 addr;
-       __le32 len;
-} __packed;
-
-struct iwl3945_tfd {
-       __le32 control_flags;
-       struct iwl3945_tfd_tb tbs[4];
-       u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644 (file)
index 5c3a68d..0000000
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET      95
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- *   to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- *   (PA) output voltage detector.  This same detector is used during Tx of
- *   long packets in normal operation to provide feedback as to proper output
- *   level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
-       u8 gain_index;          /* index into power (gain) setup table ... */
-       s8 power;               /* ... for this pwr level for this chnl group */
-       u16 v_det;              /* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- *    to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
-       struct iwl3945_eeprom_txpower_sample samples[5];  /* 5 power levels */
-       s32 a, b, c, d, e;      /* coefficients for voltage->power
-                                * formula (signed) */
-       s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
-                                * frequency (signed) */
-       s8 saturation_power;    /* highest power possible by h/w in this
-                                * band */
-       u8 group_channel;       /* "representative" channel # in this band */
-       s16 temperature;        /* h/w temperature at factory calib this band
-                                * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- *   difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
-       u32 Ta;
-       u32 Tb;
-       u32 Tc;
-       u32 Td;
-       u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
-       u8 reserved0[16];
-       u16 device_id;  /* abs.ofs: 16 */
-       u8 reserved1[2];
-       u16 pmc;                /* abs.ofs: 20 */
-       u8 reserved2[20];
-       u8 mac_address[6];      /* abs.ofs: 42 */
-       u8 reserved3[58];
-       u16 board_revision;     /* abs.ofs: 106 */
-       u8 reserved4[11];
-       u8 board_pba_number[9]; /* abs.ofs: 119 */
-       u8 reserved5[8];
-       u16 version;            /* abs.ofs: 136 */
-       u8 sku_cap;             /* abs.ofs: 138 */
-       u8 leds_mode;           /* abs.ofs: 139 */
-       u16 oem_mode;
-       u16 wowlan_mode;        /* abs.ofs: 142 */
-       u16 leds_time_interval; /* abs.ofs: 144 */
-       u8 leds_off_time;       /* abs.ofs: 146 */
-       u8 leds_on_time;        /* abs.ofs: 147 */
-       u8 almgor_m_version;    /* abs.ofs: 148 */
-       u8 antenna_switch_type; /* abs.ofs: 149 */
-       u8 reserved6[42];
-       u8 sku_id[4];           /* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-       u16 band_1_count;       /* abs.ofs: 196 */
-       struct iwl_eeprom_channel band_1_channels[14];  /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-       u16 band_2_count;       /* abs.ofs: 226 */
-       struct iwl_eeprom_channel band_2_channels[13];  /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-       u16 band_3_count;       /* abs.ofs: 254 */
-       struct iwl_eeprom_channel band_3_channels[12];  /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-       u16 band_4_count;       /* abs.ofs: 280 */
-       struct iwl_eeprom_channel band_4_channels[11];  /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-       u16 band_5_count;       /* abs.ofs: 304 */
-       struct iwl_eeprom_channel band_5_channels[6];  /* abs.ofs: 306 */
-
-       u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-       struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
-       struct iwl3945_eeprom_temperature_corr corrections;  /* abs.ofs: 832 */
-       u8 reserved16[172];     /* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)     /* bit 6    */
-#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)     /* bit 7    */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES        5
-#define IWL39_CMD_QUEUE_NUM    4
-
-#define IWL_DEFAULT_TX_RETRY  15
-
-/*********************************************/
-
-#define RFD_SIZE                              4
-#define NUM_TFD_CHUNKS                        4
-
-#define RX_QUEUE_SIZE                         256
-#define RX_QUEUE_MASK                         255
-#define RX_QUEUE_SIZE_LOG                     8
-
-#define U32_PAD(n)             ((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n)       (n << 24)
-#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n)         (n << 28)
-#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND             (0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND             (0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND             (0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND             (0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
-                               IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
-                               IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
-       return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
-              (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
-       __le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
-       return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
-       return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
-       return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644 (file)
index 7a7f0f3..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
-                               struct iwl_led_cmd *led_cmd)
-{
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_LEDS_CMD,
-               .len = sizeof(struct iwl_led_cmd),
-               .data = led_cmd,
-               .flags = CMD_ASYNC,
-               .callback = NULL,
-       };
-
-       return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
-       .cmd = iwl3945_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644 (file)
index 9671627..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644 (file)
index 8faeaf2..0000000
+++ /dev/null
@@ -1,996 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
-       7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
-       7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
-       0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
-       7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
-       s8 min_rssi;
-       u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
-       {-60, IWL_RATE_54M_INDEX},
-       {-64, IWL_RATE_48M_INDEX},
-       {-72, IWL_RATE_36M_INDEX},
-       {-80, IWL_RATE_24M_INDEX},
-       {-84, IWL_RATE_18M_INDEX},
-       {-85, IWL_RATE_12M_INDEX},
-       {-87, IWL_RATE_9M_INDEX},
-       {-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
-       {-60, IWL_RATE_54M_INDEX},
-       {-64, IWL_RATE_48M_INDEX},
-       {-68, IWL_RATE_36M_INDEX},
-       {-80, IWL_RATE_24M_INDEX},
-       {-84, IWL_RATE_18M_INDEX},
-       {-85, IWL_RATE_12M_INDEX},
-       {-86, IWL_RATE_11M_INDEX},
-       {-88, IWL_RATE_5M_INDEX},
-       {-90, IWL_RATE_2M_INDEX},
-       {-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW          62
-#define IWL_RATE_FLUSH         (3*HZ)
-#define IWL_RATE_WIN_FLUSH       (HZ/2)
-#define IWL39_RATE_HIGH_TH          11520
-#define IWL_SUCCESS_UP_TH         8960
-#define IWL_SUCCESS_DOWN_TH      10880
-#define IWL_RATE_MIN_FAILURE_TH       6
-#define IWL_RATE_MIN_SUCCESS_TH       8
-#define IWL_RATE_DECREASE_TH       1920
-#define IWL_RATE_RETRY_TH           15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
-       u32 index = 0;
-       u32 table_size = 0;
-       struct iwl3945_tpt_entry *tpt_table = NULL;
-
-       if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
-               rssi = IWL_MIN_RSSI_VAL;
-
-       switch (band) {
-       case IEEE80211_BAND_2GHZ:
-               tpt_table = iwl3945_tpt_table_g;
-               table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
-               break;
-
-       case IEEE80211_BAND_5GHZ:
-               tpt_table = iwl3945_tpt_table_a;
-               table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
-               break;
-
-       default:
-               BUG();
-               break;
-       }
-
-       while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
-               index++;
-
-       index = min(index, (table_size - 1));
-
-       return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
-       window->data = 0;
-       window->success_counter = 0;
-       window->success_ratio = -1;
-       window->counter = 0;
-       window->average_tpt = IWL_INVALID_VALUE;
-       window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed.  If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
-       int unflushed = 0;
-       int i;
-       unsigned long flags;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-       /*
-        * For each rate, if we have collected data on that rate
-        * and it has been more than IWL_RATE_WIN_FLUSH
-        * since we flushed, clear out the gathered statistics
-        */
-       for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
-               if (!rs_sta->win[i].counter)
-                       continue;
-
-               spin_lock_irqsave(&rs_sta->lock, flags);
-               if (time_after(jiffies, rs_sta->win[i].stamp +
-                              IWL_RATE_WIN_FLUSH)) {
-                       IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
-                                      "index %d\n",
-                                      rs_sta->win[i].counter, i);
-                       iwl3945_clear_window(&rs_sta->win[i]);
-               } else
-                       unflushed++;
-               spin_unlock_irqrestore(&rs_sta->lock, flags);
-       }
-
-       return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX              5000   /* msec */
-#define IWL_RATE_FLUSH_MIN              50     /* msec */
-#define IWL_AVERAGE_PACKETS             1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
-       struct iwl3945_rs_sta *rs_sta = (void *)data;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-       int unflushed = 0;
-       unsigned long flags;
-       u32 packet_count, duration, pps;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       /* Number of packets Rx'd since last time this timer ran */
-       packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
-       rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
-       if (unflushed) {
-               duration =
-                   jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
-               IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
-                              packet_count, duration);
-
-               /* Determine packets per second */
-               if (duration)
-                       pps = (packet_count * 1000) / duration;
-               else
-                       pps = 0;
-
-               if (pps) {
-                       duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
-                       if (duration < IWL_RATE_FLUSH_MIN)
-                               duration = IWL_RATE_FLUSH_MIN;
-                       else if (duration > IWL_RATE_FLUSH_MAX)
-                               duration = IWL_RATE_FLUSH_MAX;
-               } else
-                       duration = IWL_RATE_FLUSH_MAX;
-
-               rs_sta->flush_time = msecs_to_jiffies(duration);
-
-               IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
-                              duration, packet_count);
-
-               mod_timer(&rs_sta->rate_scale_flush, jiffies +
-                         rs_sta->flush_time);
-
-               rs_sta->last_partial_flush = jiffies;
-       } else {
-               rs_sta->flush_time = IWL_RATE_FLUSH;
-               rs_sta->flush_pending = 0;
-       }
-       /* If there weren't any unflushed entries, we don't schedule the timer
-        * to run again */
-
-       rs_sta->last_flush = jiffies;
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate.  window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
-                               struct iwl3945_rate_scale_data *window,
-                               int success, int retries, int index)
-{
-       unsigned long flags;
-       s32 fail_count;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-       if (!retries) {
-               IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
-               return;
-       }
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       /*
-        * Keep track of only the latest 62 tx frame attempts in this rate's
-        * history window; anything older isn't really relevant any more.
-        * If we have filled up the sliding window, drop the oldest attempt;
-        * if the oldest attempt (highest bit in bitmap) shows "success",
-        * subtract "1" from the success counter (this is the main reason
-        * we keep these bitmaps!).
-        * */
-       while (retries > 0) {
-               if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
-                       /* remove earliest */
-                       window->counter = IWL_RATE_MAX_WINDOW - 1;
-
-                       if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
-                               window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
-                               window->success_counter--;
-                       }
-               }
-
-               /* Increment frames-attempted counter */
-               window->counter++;
-
-               /* Shift bitmap by one frame (throw away oldest history),
-                * OR in "1", and increment "success" if this
-                * frame was successful. */
-               window->data <<= 1;
-               if (success > 0) {
-                       window->success_counter++;
-                       window->data |= 0x1;
-                       success--;
-               }
-
-               retries--;
-       }
-
-       /* Calculate current success ratio, avoid divide-by-0! */
-       if (window->counter > 0)
-               window->success_ratio = 128 * (100 * window->success_counter)
-                                       / window->counter;
-       else
-               window->success_ratio = IWL_INVALID_VALUE;
-
-       fail_count = window->counter - window->success_counter;
-
-       /* Calculate average throughput, if we have enough history. */
-       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
-           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
-               window->average_tpt = ((window->success_ratio *
-                               rs_sta->expected_tpt[index] + 64) / 128);
-       else
-               window->average_tpt = IWL_INVALID_VALUE;
-
-       /* Tag this window as having been updated */
-       window->stamp = jiffies;
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
-       struct ieee80211_hw *hw = priv->hw;
-       struct ieee80211_conf *conf = &priv->hw->conf;
-       struct iwl3945_sta_priv *psta;
-       struct iwl3945_rs_sta *rs_sta;
-       struct ieee80211_supported_band *sband;
-       int i;
-
-       IWL_DEBUG_INFO(priv, "enter\n");
-       if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
-               goto out;
-
-       psta = (struct iwl3945_sta_priv *) sta->drv_priv;
-       rs_sta = &psta->rs_sta;
-       sband = hw->wiphy->bands[conf->channel->band];
-
-       rs_sta->priv = priv;
-
-       rs_sta->start_rate = IWL_RATE_INVALID;
-
-       /* default to just 802.11b */
-       rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
-       rs_sta->last_partial_flush = jiffies;
-       rs_sta->last_flush = jiffies;
-       rs_sta->flush_time = IWL_RATE_FLUSH;
-       rs_sta->last_tx_packets = 0;
-
-       rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
-       rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
-       for (i = 0; i < IWL_RATE_COUNT_3945; i++)
-               iwl3945_clear_window(&rs_sta->win[i]);
-
-       /* TODO: what is a good starting rate for STA? About middle? Maybe not
-        * the lowest or the highest rate.. Could consider using RSSI from
-        * previous packets? Need to have IEEE 802.1X auth succeed immediately
-        * after assoc.. */
-
-       for (i = sband->n_bitrates - 1; i >= 0; i--) {
-               if (sta->supp_rates[sband->band] & (1 << i)) {
-                       rs_sta->last_txrate_idx = i;
-                       break;
-               }
-       }
-
-       priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
-       /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
-       if (sband->band == IEEE80211_BAND_5GHZ) {
-               rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
-               priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
-                                               IWL_FIRST_OFDM_RATE;
-       }
-
-out:
-       priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
-       IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
-       return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void iwl3945_rs_free(void *priv)
-{
-       return;
-}
-
-static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
-       struct iwl3945_rs_sta *rs_sta;
-       struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
-       struct iwl_priv *priv __maybe_unused = iwl_priv;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       rs_sta = &psta->rs_sta;
-
-       spin_lock_init(&rs_sta->lock);
-       init_timer(&rs_sta->rate_scale_flush);
-
-       IWL_DEBUG_RATE(priv, "leave\n");
-
-       return rs_sta;
-}
-
-static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
-                       void *priv_sta)
-{
-       struct iwl3945_rs_sta *rs_sta = priv_sta;
-
-       /*
-        * Be careful not to use any members of iwl3945_rs_sta (like trying
-        * to use iwl_priv to print out debugging) since it may not be fully
-        * initialized at this point.
-        */
-       del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * iwl3945_rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
-                        struct ieee80211_sta *sta, void *priv_sta,
-                        struct sk_buff *skb)
-{
-       s8 retries = 0, current_count;
-       int scale_rate_index, first_index, last_index;
-       unsigned long flags;
-       struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
-       struct iwl3945_rs_sta *rs_sta = priv_sta;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       retries = info->status.rates[0].count;
-       /* Sanity Check for retries */
-       if (retries > IWL_RATE_RETRY_TH)
-               retries = IWL_RATE_RETRY_TH;
-
-       first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
-       if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
-               IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
-               return;
-       }
-
-       if (!priv_sta) {
-               IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
-               return;
-       }
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (!rs_sta->priv) {
-               IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
-               return;
-       }
-
-
-       rs_sta->tx_packets++;
-
-       scale_rate_index = first_index;
-       last_index = first_index;
-
-       /*
-        * Update the window for each rate.  We determine which rates
-        * were Tx'd based on the total number of retries vs. the number
-        * of retries configured for each rate -- currently set to the
-        * priv value 'retry_rate' vs. rate specific
-        *
-        * On exit from this while loop last_index indicates the rate
-        * at which the frame was finally transmitted (or failed if no
-        * ACK)
-        */
-       while (retries > 1) {
-               if ((retries - 1) < priv->retry_rate) {
-                       current_count = (retries - 1);
-                       last_index = scale_rate_index;
-               } else {
-                       current_count = priv->retry_rate;
-                       last_index = iwl3945_rs_next_rate(priv,
-                                                        scale_rate_index);
-               }
-
-               /* Update this rate accounting for as many retries
-                * as was used for it (per current_count) */
-               iwl3945_collect_tx_data(rs_sta,
-                                   &rs_sta->win[scale_rate_index],
-                                   0, current_count, scale_rate_index);
-               IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
-                              scale_rate_index, current_count);
-
-               retries -= current_count;
-
-               scale_rate_index = last_index;
-       }
-
-
-       /* Update the last index window with success/failure based on ACK */
-       IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
-                      last_index,
-                      (info->flags & IEEE80211_TX_STAT_ACK) ?
-                      "success" : "failure");
-       iwl3945_collect_tx_data(rs_sta,
-                           &rs_sta->win[last_index],
-                           info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
-       /* We updated the rate scale window -- if its been more than
-        * flush_time since the last run, schedule the flush
-        * again */
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       if (!rs_sta->flush_pending &&
-           time_after(jiffies, rs_sta->last_flush +
-                      rs_sta->flush_time)) {
-
-               rs_sta->last_partial_flush = jiffies;
-               rs_sta->flush_pending = 1;
-               mod_timer(&rs_sta->rate_scale_flush,
-                         jiffies + rs_sta->flush_time);
-       }
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
-                                u8 index, u16 rate_mask, enum ieee80211_band band)
-{
-       u8 high = IWL_RATE_INVALID;
-       u8 low = IWL_RATE_INVALID;
-       struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
-       /* 802.11A walks to the next literal adjacent rate in
-        * the rate table */
-       if (unlikely(band == IEEE80211_BAND_5GHZ)) {
-               int i;
-               u32 mask;
-
-               /* Find the previous rate that is in the rate mask */
-               i = index - 1;
-               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
-                       if (rate_mask & mask) {
-                               low = i;
-                               break;
-                       }
-               }
-
-               /* Find the next rate that is in the rate mask */
-               i = index + 1;
-               for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
-                    i++, mask <<= 1) {
-                       if (rate_mask & mask) {
-                               high = i;
-                               break;
-                       }
-               }
-
-               return (high << 8) | low;
-       }
-
-       low = index;
-       while (low != IWL_RATE_INVALID) {
-               if (rs_sta->tgg)
-                       low = iwl3945_rates[low].prev_rs_tgg;
-               else
-                       low = iwl3945_rates[low].prev_rs;
-               if (low == IWL_RATE_INVALID)
-                       break;
-               if (rate_mask & (1 << low))
-                       break;
-               IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
-       }
-
-       high = index;
-       while (high != IWL_RATE_INVALID) {
-               if (rs_sta->tgg)
-                       high = iwl3945_rates[high].next_rs_tgg;
-               else
-                       high = iwl3945_rates[high].next_rs;
-               if (high == IWL_RATE_INVALID)
-                       break;
-               if (rate_mask & (1 << high))
-                       break;
-               IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
-       }
-
-       return (high << 8) | low;
-}
-
-/**
- * iwl3945_rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
-                       void *priv_sta, struct ieee80211_tx_rate_control *txrc)
-{
-       struct ieee80211_supported_band *sband = txrc->sband;
-       struct sk_buff *skb = txrc->skb;
-       u8 low = IWL_RATE_INVALID;
-       u8 high = IWL_RATE_INVALID;
-       u16 high_low;
-       int index;
-       struct iwl3945_rs_sta *rs_sta = priv_sta;
-       struct iwl3945_rate_scale_data *window = NULL;
-       int current_tpt = IWL_INVALID_VALUE;
-       int low_tpt = IWL_INVALID_VALUE;
-       int high_tpt = IWL_INVALID_VALUE;
-       u32 fail_count;
-       s8 scale_action = 0;
-       unsigned long flags;
-       u16 rate_mask;
-       s8 max_rate_idx = -1;
-       struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (rs_sta && !rs_sta->priv) {
-               IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
-               priv_sta = NULL;
-       }
-
-       if (rate_control_send_low(sta, priv_sta, txrc))
-               return;
-
-       rate_mask = sta->supp_rates[sband->band];
-
-       /* get user max rate if set */
-       max_rate_idx = txrc->max_rate_idx;
-       if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
-               max_rate_idx += IWL_FIRST_OFDM_RATE;
-       if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
-               max_rate_idx = -1;
-
-       index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
-       if (sband->band == IEEE80211_BAND_5GHZ)
-               rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       /* for recent assoc, choose best rate regarding
-        * to rssi value
-        */
-       if (rs_sta->start_rate != IWL_RATE_INVALID) {
-               if (rs_sta->start_rate < index &&
-                  (rate_mask & (1 << rs_sta->start_rate)))
-                       index = rs_sta->start_rate;
-               rs_sta->start_rate = IWL_RATE_INVALID;
-       }
-
-       /* force user max rate if set by user */
-       if ((max_rate_idx != -1) && (max_rate_idx < index)) {
-               if (rate_mask & (1 << max_rate_idx))
-                       index = max_rate_idx;
-       }
-
-       window = &(rs_sta->win[index]);
-
-       fail_count = window->counter - window->success_counter;
-
-       if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
-            (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
-               spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-               IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
-                              "counter: %d, success_counter: %d, "
-                              "expected_tpt is %sNULL\n",
-                              index,
-                              window->counter,
-                              window->success_counter,
-                              rs_sta->expected_tpt ? "not " : "");
-
-          /* Can't calculate this yet; not enough history */
-               window->average_tpt = IWL_INVALID_VALUE;
-               goto out;
-
-       }
-
-       current_tpt = window->average_tpt;
-
-       high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
-                                            sband->band);
-       low = high_low & 0xff;
-       high = (high_low >> 8) & 0xff;
-
-       /* If user set max rate, dont allow higher than user constrain */
-       if ((max_rate_idx != -1) && (max_rate_idx < high))
-               high = IWL_RATE_INVALID;
-
-       /* Collect Measured throughputs of adjacent rates */
-       if (low != IWL_RATE_INVALID)
-               low_tpt = rs_sta->win[low].average_tpt;
-
-       if (high != IWL_RATE_INVALID)
-               high_tpt = rs_sta->win[high].average_tpt;
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       scale_action = 0;
-
-       /* Low success ratio , need to drop the rate */
-       if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
-               IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
-               scale_action = -1;
-       /* No throughput measured yet for adjacent rates,
-        * try increase */
-       } else if ((low_tpt == IWL_INVALID_VALUE) &&
-                  (high_tpt == IWL_INVALID_VALUE)) {
-
-               if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
-                       scale_action = 1;
-               else if (low != IWL_RATE_INVALID)
-                       scale_action = 0;
-
-       /* Both adjacent throughputs are measured, but neither one has
-        * better throughput; we're using the best rate, don't change
-        * it! */
-       } else if ((low_tpt != IWL_INVALID_VALUE) &&
-                (high_tpt != IWL_INVALID_VALUE) &&
-                (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
-               IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
-                              "current_tpt [%d]\n",
-                              low_tpt, high_tpt, current_tpt);
-               scale_action = 0;
-
-       /* At least one of the rates has better throughput */
-       } else {
-               if (high_tpt != IWL_INVALID_VALUE) {
-
-                       /* High rate has better throughput, Increase
-                        * rate */
-                       if (high_tpt > current_tpt &&
-                               window->success_ratio >= IWL_RATE_INCREASE_TH)
-                               scale_action = 1;
-                       else {
-                               IWL_DEBUG_RATE(priv,
-                                   "decrease rate because of high tpt\n");
-                               scale_action = 0;
-                       }
-               } else if (low_tpt != IWL_INVALID_VALUE) {
-                       if (low_tpt > current_tpt) {
-                               IWL_DEBUG_RATE(priv,
-                                   "decrease rate because of low tpt\n");
-                               scale_action = -1;
-                       } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
-                               /* Lower rate has better
-                                * throughput,decrease rate */
-                               scale_action = 1;
-                       }
-               }
-       }
-
-       /* Sanity check; asked for decrease, but success rate or throughput
-        * has been good at old rate.  Don't change it. */
-       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
-                   ((window->success_ratio > IWL_RATE_HIGH_TH) ||
-                    (current_tpt > (100 * rs_sta->expected_tpt[low]))))
-               scale_action = 0;
-
-       switch (scale_action) {
-       case -1:
-
-               /* Decrese rate */
-               if (low != IWL_RATE_INVALID)
-                       index = low;
-               break;
-
-       case 1:
-               /* Increase rate */
-               if (high != IWL_RATE_INVALID)
-                       index = high;
-
-               break;
-
-       case 0:
-       default:
-               /* No change */
-               break;
-       }
-
-       IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
-                      index, scale_action, low, high);
-
- out:
-
-       if (sband->band == IEEE80211_BAND_5GHZ) {
-               if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
-                       index = IWL_FIRST_OFDM_RATE;
-               rs_sta->last_txrate_idx = index;
-               info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
-       } else {
-               rs_sta->last_txrate_idx = index;
-               info->control.rates[0].idx = rs_sta->last_txrate_idx;
-       }
-
-       IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
-       file->private_data = inode->i_private;
-       return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
-                                                 char __user *user_buf,
-                                                 size_t count, loff_t *ppos)
-{
-       char *buff;
-       int desc = 0;
-       int j;
-       ssize_t ret;
-       struct iwl3945_rs_sta *lq_sta = file->private_data;
-
-       buff = kmalloc(1024, GFP_KERNEL);
-       if (!buff)
-               return -ENOMEM;
-
-       desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
-                       "rate=0x%X flush time %d\n",
-                       lq_sta->tx_packets,
-                       lq_sta->last_txrate_idx,
-                       lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
-       for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
-               desc += sprintf(buff+desc,
-                               "counter=%d success=%d %%=%d\n",
-                               lq_sta->win[j].counter,
-                               lq_sta->win[j].success_counter,
-                               lq_sta->win[j].success_ratio);
-       }
-       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-       kfree(buff);
-       return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
-       .read = iwl3945_sta_dbgfs_stats_table_read,
-       .open = iwl3945_open_file_generic,
-       .llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
-                               struct dentry *dir)
-{
-       struct iwl3945_rs_sta *lq_sta = priv_sta;
-
-       lq_sta->rs_sta_dbgfs_stats_table_file =
-               debugfs_create_file("rate_stats_table", 0600, dir,
-               lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
-       struct iwl3945_rs_sta *lq_sta = priv_sta;
-       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void iwl3945_rs_rate_init_stub(void *priv_r,
-                               struct ieee80211_supported_band *sband,
-                             struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
-       .module = NULL,
-       .name = RS_NAME,
-       .tx_status = iwl3945_rs_tx_status,
-       .get_rate = iwl3945_rs_get_rate,
-       .rate_init = iwl3945_rs_rate_init_stub,
-       .alloc = iwl3945_rs_alloc,
-       .free = iwl3945_rs_free,
-       .alloc_sta = iwl3945_rs_alloc_sta,
-       .free_sta = iwl3945_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
-       .add_sta_debugfs = iwl3945_add_debugfs,
-       .remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
-       struct iwl_priv *priv = hw->priv;
-       s32 rssi = 0;
-       unsigned long flags;
-       struct iwl3945_rs_sta *rs_sta;
-       struct ieee80211_sta *sta;
-       struct iwl3945_sta_priv *psta;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-
-       rcu_read_lock();
-
-       sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
-                                priv->stations[sta_id].sta.sta.addr);
-       if (!sta) {
-               IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
-               rcu_read_unlock();
-               return;
-       }
-
-       psta = (void *) sta->drv_priv;
-       rs_sta = &psta->rs_sta;
-
-       spin_lock_irqsave(&rs_sta->lock, flags);
-
-       rs_sta->tgg = 0;
-       switch (priv->band) {
-       case IEEE80211_BAND_2GHZ:
-               /* TODO: this always does G, not a regression */
-               if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
-                                               RXON_FLG_TGG_PROTECT_MSK) {
-                       rs_sta->tgg = 1;
-                       rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
-               } else
-                       rs_sta->expected_tpt = iwl3945_expected_tpt_g;
-               break;
-
-       case IEEE80211_BAND_5GHZ:
-               rs_sta->expected_tpt = iwl3945_expected_tpt_a;
-               break;
-       case IEEE80211_NUM_BANDS:
-               BUG();
-               break;
-       }
-
-       spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-       rssi = priv->_3945.last_rx_rssi;
-       if (rssi == 0)
-               rssi = IWL_MIN_RSSI_VAL;
-
-       IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
-       rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
-       IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
-                      "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
-                      iwl3945_rates[rs_sta->start_rate].plcp);
-       rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
-       return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
-       ieee80211_rate_control_unregister(&rs_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644 (file)
index f7c0a74..0000000
+++ /dev/null
@@ -1,2741 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
-       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,   \
-                                   IWL_RATE_##r##M_IEEE,   \
-                                   IWL_RATE_##ip##M_INDEX, \
-                                   IWL_RATE_##in##M_INDEX, \
-                                   IWL_RATE_##rp##M_INDEX, \
-                                   IWL_RATE_##rn##M_INDEX, \
-                                   IWL_RATE_##pp##M_INDEX, \
-                                   IWL_RATE_##np##M_INDEX, \
-                                   IWL_RATE_##r##M_INDEX_TABLE, \
-                                   IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- *   rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
-       IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),    /*  1mbps */
-       IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),          /*  2mbps */
-       IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
-       IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),      /* 11mbps */
-       IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
-       IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),       /*  9mbps */
-       IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
-       IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
-       IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
-       IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
-       IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
-       IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
-       u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
-       if (rate == IWL_RATE_INVALID)
-               rate = rate_index;
-       return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
- *   Default values of 0 enable uCode events to be logged.
- * Use for only special debugging.  This function is just a placeholder as-is,
- *   you'll need to provide the special bits! ...
- *   ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
-       int i;
-       u32 base;               /* SRAM address of event log header */
-       u32 disable_ptr;        /* SRAM address of event-disable bitmap array */
-       u32 array_size;         /* # of u32 entries in array */
-       static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
-               0x00000000,     /*   31 -    0  Event id numbers */
-               0x00000000,     /*   63 -   32 */
-               0x00000000,     /*   95 -   64 */
-               0x00000000,     /*  127 -   96 */
-               0x00000000,     /*  159 -  128 */
-               0x00000000,     /*  191 -  160 */
-               0x00000000,     /*  223 -  192 */
-               0x00000000,     /*  255 -  224 */
-               0x00000000,     /*  287 -  256 */
-               0x00000000,     /*  319 -  288 */
-               0x00000000,     /*  351 -  320 */
-               0x00000000,     /*  383 -  352 */
-               0x00000000,     /*  415 -  384 */
-               0x00000000,     /*  447 -  416 */
-               0x00000000,     /*  479 -  448 */
-               0x00000000,     /*  511 -  480 */
-               0x00000000,     /*  543 -  512 */
-               0x00000000,     /*  575 -  544 */
-               0x00000000,     /*  607 -  576 */
-               0x00000000,     /*  639 -  608 */
-               0x00000000,     /*  671 -  640 */
-               0x00000000,     /*  703 -  672 */
-               0x00000000,     /*  735 -  704 */
-               0x00000000,     /*  767 -  736 */
-               0x00000000,     /*  799 -  768 */
-               0x00000000,     /*  831 -  800 */
-               0x00000000,     /*  863 -  832 */
-               0x00000000,     /*  895 -  864 */
-               0x00000000,     /*  927 -  896 */
-               0x00000000,     /*  959 -  928 */
-               0x00000000,     /*  991 -  960 */
-               0x00000000,     /* 1023 -  992 */
-               0x00000000,     /* 1055 - 1024 */
-               0x00000000,     /* 1087 - 1056 */
-               0x00000000,     /* 1119 - 1088 */
-               0x00000000,     /* 1151 - 1120 */
-               0x00000000,     /* 1183 - 1152 */
-               0x00000000,     /* 1215 - 1184 */
-               0x00000000,     /* 1247 - 1216 */
-               0x00000000,     /* 1279 - 1248 */
-               0x00000000,     /* 1311 - 1280 */
-               0x00000000,     /* 1343 - 1312 */
-               0x00000000,     /* 1375 - 1344 */
-               0x00000000,     /* 1407 - 1376 */
-               0x00000000,     /* 1439 - 1408 */
-               0x00000000,     /* 1471 - 1440 */
-               0x00000000,     /* 1503 - 1472 */
-       };
-
-       base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
-               return;
-       }
-
-       disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
-       array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
-       if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
-               IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
-                              disable_ptr);
-               for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
-                       iwl_legacy_write_targ_mem(priv,
-                                          disable_ptr + (i * sizeof(u32)),
-                                          evt_disable[i]);
-
-       } else {
-               IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
-               IWL_DEBUG_INFO(priv, "  by writing \"1\"s into disable bitmap\n");
-               IWL_DEBUG_INFO(priv, "  in SRAM at 0x%x, size %d u32s\n",
-                              disable_ptr, array_size);
-       }
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
-       int idx;
-
-       for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
-               if (iwl3945_rates[idx].plcp == plcp)
-                       return idx;
-       return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
-       switch (status & TX_STATUS_MSK) {
-       case TX_3945_STATUS_SUCCESS:
-               return "SUCCESS";
-               TX_STATUS_ENTRY(SHORT_LIMIT);
-               TX_STATUS_ENTRY(LONG_LIMIT);
-               TX_STATUS_ENTRY(FIFO_UNDERRUN);
-               TX_STATUS_ENTRY(MGMNT_ABORT);
-               TX_STATUS_ENTRY(NEXT_FRAG);
-               TX_STATUS_ENTRY(LIFE_EXPIRE);
-               TX_STATUS_ENTRY(DEST_PS);
-               TX_STATUS_ENTRY(ABORTED);
-               TX_STATUS_ENTRY(BT_RETRY);
-               TX_STATUS_ENTRY(STA_INVALID);
-               TX_STATUS_ENTRY(FRAG_DROPPED);
-               TX_STATUS_ENTRY(TID_DISABLE);
-               TX_STATUS_ENTRY(FRAME_FLUSHED);
-               TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
-               TX_STATUS_ENTRY(TX_LOCKED);
-               TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
-       }
-
-       return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
-       return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
-       int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
-       switch (priv->band) {
-       case IEEE80211_BAND_5GHZ:
-               if (rate == IWL_RATE_12M_INDEX)
-                       next_rate = IWL_RATE_9M_INDEX;
-               else if (rate == IWL_RATE_6M_INDEX)
-                       next_rate = IWL_RATE_6M_INDEX;
-               break;
-       case IEEE80211_BAND_2GHZ:
-               if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-                   iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-                       if (rate == IWL_RATE_11M_INDEX)
-                               next_rate = IWL_RATE_5M_INDEX;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
-                                    int txq_id, int index)
-{
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
-       struct iwl_tx_info *tx_info;
-
-       BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
-       for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
-               q->read_ptr != index;
-               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-               tx_info = &txq->txb[txq->q.read_ptr];
-               ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-               tx_info->skb = NULL;
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-       }
-
-       if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
-                       (txq_id != IWL39_CMD_QUEUE_NUM) &&
-                       priv->mac80211_registered)
-               iwl_legacy_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-       int txq_id = SEQ_TO_QUEUE(sequence);
-       int index = SEQ_TO_INDEX(sequence);
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct ieee80211_tx_info *info;
-       struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
-       u32  status = le32_to_cpu(tx_resp->status);
-       int rate_idx;
-       int fail;
-
-       if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
-               IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-                         "is out of range [0-%d] %d %d\n", txq_id,
-                         index, txq->q.n_bd, txq->q.write_ptr,
-                         txq->q.read_ptr);
-               return;
-       }
-
-       txq->time_stamp = jiffies;
-       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
-       ieee80211_tx_info_clear_status(info);
-
-       /* Fill the MRR chain with some info about on-chip retransmissions */
-       rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
-       if (info->band == IEEE80211_BAND_5GHZ)
-               rate_idx -= IWL_FIRST_OFDM_RATE;
-
-       fail = tx_resp->failure_frame;
-
-       info->status.rates[0].idx = rate_idx;
-       info->status.rates[0].count = fail + 1; /* add final attempt */
-
-       /* tx_status->rts_retry_count = tx_resp->failure_rts; */
-       info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
-                               IEEE80211_TX_STAT_ACK : 0;
-
-       IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
-                       txq_id, iwl3945_get_tx_fail_reason(status), status,
-                       tx_resp->rate, tx_resp->failure_frame);
-
-       IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
-       iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
-       if (status & TX_ABORT_REQUIRED_MSK)
-               IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- *  RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
-                                           __le32 *stats)
-{
-       int i;
-       __le32 *prev_stats;
-       u32 *accum_stats;
-       u32 *delta, *max_delta;
-
-       prev_stats = (__le32 *)&priv->_3945.statistics;
-       accum_stats = (u32 *)&priv->_3945.accum_statistics;
-       delta = (u32 *)&priv->_3945.delta_statistics;
-       max_delta = (u32 *)&priv->_3945.max_delta;
-
-       for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
-            i += sizeof(__le32), stats++, prev_stats++, delta++,
-            max_delta++, accum_stats++) {
-               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-                       *delta = (le32_to_cpu(*stats) -
-                               le32_to_cpu(*prev_stats));
-                       *accum_stats += *delta;
-                       if (*delta > *max_delta)
-                               *max_delta = *delta;
-               }
-       }
-
-       /* reset accumulative statistics for "no-counter" type statistics */
-       priv->_3945.accum_statistics.general.temperature =
-               priv->_3945.statistics.general.temperature;
-       priv->_3945.accum_statistics.general.ttl_timestamp =
-               priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
-               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
-                    (int)sizeof(struct iwl3945_notif_statistics),
-                    le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-       iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-
-       memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       __le32 *flag = (__le32 *)&pkt->u.raw;
-
-       if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-               memset(&priv->_3945.accum_statistics, 0,
-                       sizeof(struct iwl3945_notif_statistics));
-               memset(&priv->_3945.delta_statistics, 0,
-                       sizeof(struct iwl3945_notif_statistics));
-               memset(&priv->_3945.max_delta, 0,
-                       sizeof(struct iwl3945_notif_statistics));
-#endif
-               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-       }
-       iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
-               struct ieee80211_hdr *header)
-{
-       /* Filter incoming packets to determine if they are targeted toward
-        * this network, discarding packets coming from ourselves */
-       switch (priv->iw_mode) {
-       case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source    | BSSID */
-               /* packets to our IBSS update information */
-               return !compare_ether_addr(header->addr3, priv->bssid);
-       case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
-               /* packets to our IBSS update information */
-               return !compare_ether_addr(header->addr2, priv->bssid);
-       default:
-               return 1;
-       }
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
-                                  struct iwl_rx_mem_buffer *rxb,
-                                  struct ieee80211_rx_status *stats)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-       struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-       struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-       u16 len = le16_to_cpu(rx_hdr->len);
-       struct sk_buff *skb;
-       __le16 fc = hdr->frame_control;
-
-       /* We received data from the HW, so stop the watchdog */
-       if (unlikely(len + IWL39_RX_FRAME_SIZE >
-                    PAGE_SIZE << priv->hw_params.rx_page_order)) {
-               IWL_DEBUG_DROP(priv, "Corruption detected!\n");
-               return;
-       }
-
-       /* We only process data packets if the interface is open */
-       if (unlikely(!priv->is_open)) {
-               IWL_DEBUG_DROP_LIMIT(priv,
-                       "Dropping packet while interface is not open.\n");
-               return;
-       }
-
-       skb = dev_alloc_skb(128);
-       if (!skb) {
-               IWL_ERR(priv, "dev_alloc_skb failed\n");
-               return;
-       }
-
-       if (!iwl3945_mod_params.sw_crypto)
-               iwl_legacy_set_decrypted_flag(priv,
-                                      (struct ieee80211_hdr *)rxb_addr(rxb),
-                                      le32_to_cpu(rx_end->status), stats);
-
-       skb_add_rx_frag(skb, 0, rxb->page,
-                       (void *)rx_hdr->payload - (void *)pkt, len);
-
-       iwl_legacy_update_stats(priv, false, fc, len);
-       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
-       ieee80211_rx(priv->hw, skb);
-       priv->alloc_rxb_page--;
-       rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct ieee80211_hdr *header;
-       struct ieee80211_rx_status rx_status;
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
-       struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
-       struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
-       u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
-       u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
-       u8 network_packet;
-
-       rx_status.flag = 0;
-       rx_status.mactime = le64_to_cpu(rx_end->timestamp);
-       rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-       rx_status.freq =
-               ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
-                                              rx_status.band);
-
-       rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
-       if (rx_status.band == IEEE80211_BAND_5GHZ)
-               rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
-       rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
-                                       RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
-       /* set the preamble flag if appropriate */
-       if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-               rx_status.flag |= RX_FLAG_SHORTPRE;
-
-       if ((unlikely(rx_stats->phy_count > 20))) {
-               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
-                               rx_stats->phy_count);
-               return;
-       }
-
-       if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
-           || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
-               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
-               return;
-       }
-
-
-
-       /* Convert 3945's rssi indicator to dBm */
-       rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
-       IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
-                       rx_status.signal, rx_stats_sig_avg,
-                       rx_stats_noise_diff);
-
-       header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
-       network_packet = iwl3945_is_network_packet(priv, header);
-
-       IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
-                             network_packet ? '*' : ' ',
-                             le16_to_cpu(rx_hdr->channel),
-                             rx_status.signal, rx_status.signal,
-                             rx_status.rate_idx);
-
-       iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
-                                               header);
-
-       if (network_packet) {
-               priv->_3945.last_beacon_time =
-                       le32_to_cpu(rx_end->beacon_timestamp);
-               priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
-               priv->_3945.last_rx_rssi = rx_status.signal;
-       }
-
-       iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                    struct iwl_tx_queue *txq,
-                                    dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
-       int count;
-       struct iwl_queue *q;
-       struct iwl3945_tfd *tfd, *tfd_tmp;
-
-       q = &txq->q;
-       tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
-       tfd = &tfd_tmp[q->write_ptr];
-
-       if (reset)
-               memset(tfd, 0, sizeof(*tfd));
-
-       count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
-       if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
-               IWL_ERR(priv, "Error can not send more than %d chunks\n",
-                         NUM_TFD_CHUNKS);
-               return -EINVAL;
-       }
-
-       tfd->tbs[count].addr = cpu_to_le32(addr);
-       tfd->tbs[count].len = cpu_to_le32(len);
-
-       count++;
-
-       tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
-                                        TFD_CTL_PAD_SET(pad));
-
-       return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
-       int index = txq->q.read_ptr;
-       struct iwl3945_tfd *tfd = &tfd_tmp[index];
-       struct pci_dev *dev = priv->pci_dev;
-       int i;
-       int counter;
-
-       /* sanity check */
-       counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-       if (counter > NUM_TFD_CHUNKS) {
-               IWL_ERR(priv, "Too many chunks: %i\n", counter);
-               /* @todo issue fatal error, it is quite serious situation */
-               return;
-       }
-
-       /* Unmap tx_cmd */
-       if (counter)
-               pci_unmap_single(dev,
-                               dma_unmap_addr(&txq->meta[index], mapping),
-                               dma_unmap_len(&txq->meta[index], len),
-                               PCI_DMA_TODEVICE);
-
-       /* unmap chunks if any */
-
-       for (i = 1; i < counter; i++)
-               pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
-                        le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
-       /* free SKB */
-       if (txq->txb) {
-               struct sk_buff *skb;
-
-               skb = txq->txb[txq->q.read_ptr].skb;
-
-               /* can be called from irqs-disabled context */
-               if (skb) {
-                       dev_kfree_skb_any(skb);
-                       txq->txb[txq->q.read_ptr].skb = NULL;
-               }
-       }
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
-                                 struct iwl_device_cmd *cmd,
-                                 struct ieee80211_tx_info *info,
-                                 struct ieee80211_hdr *hdr,
-                                 int sta_id, int tx_id)
-{
-       u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
-       u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
-       u16 rate_mask;
-       int rate;
-       u8 rts_retry_limit;
-       u8 data_retry_limit;
-       __le32 tx_flags;
-       __le16 fc = hdr->frame_control;
-       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
-       rate = iwl3945_rates[rate_index].plcp;
-       tx_flags = tx_cmd->tx_flags;
-
-       /* We need to figure out how to get the sta->supp_rates while
-        * in this running context */
-       rate_mask = IWL_RATES_MASK_3945;
-
-       /* Set retry limit on DATA packets and Probe Responses*/
-       if (ieee80211_is_probe_resp(fc))
-               data_retry_limit = 3;
-       else
-               data_retry_limit = IWL_DEFAULT_TX_RETRY;
-       tx_cmd->data_retry_limit = data_retry_limit;
-
-       if (tx_id >= IWL39_CMD_QUEUE_NUM)
-               rts_retry_limit = 3;
-       else
-               rts_retry_limit = 7;
-
-       if (data_retry_limit < rts_retry_limit)
-               rts_retry_limit = data_retry_limit;
-       tx_cmd->rts_retry_limit = rts_retry_limit;
-
-       tx_cmd->rate = rate;
-       tx_cmd->tx_flags = tx_flags;
-
-       /* OFDM */
-       tx_cmd->supp_rates[0] =
-          ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
-       /* CCK */
-       tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
-       IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
-                      "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
-                      tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
-                      tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
-       unsigned long flags_spin;
-       struct iwl_station_entry *station;
-
-       if (sta_id == IWL_INVALID_STATION)
-               return IWL_INVALID_STATION;
-
-       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-       station = &priv->stations[sta_id];
-
-       station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
-       station->sta.rate_n_flags = cpu_to_le16(tx_rate);
-       station->sta.mode = STA_CONTROL_MODIFY_MSK;
-       iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
-       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
-       IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
-                       sta_id, tx_rate);
-       return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
-               if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
-                       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
-                                       ~APMG_PS_CTRL_MSK_PWR_SRC);
-
-                       iwl_poll_bit(priv, CSR_GPIO_IN,
-                                    CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
-                                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);
-               }
- */
-
-       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-                       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
-                       ~APMG_PS_CTRL_MSK_PWR_SRC);
-
-       iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
-                    CSR_GPIO_IN_BIT_AUX_POWER, 5000);  /* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
-       iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
-                                       rxq->rb_stts_dma);
-       iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
-       iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
-               FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
-               FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
-               FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
-               FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
-               (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
-               FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
-               (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
-               FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
-       /* fake read to flush all prev I/O */
-       iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
-
-       return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
-       /* bypass mode */
-       iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
-       /* RA 0 is active */
-       iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
-       /* all 6 fifo are active */
-       iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
-       iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
-       iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
-       iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
-       iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
-       iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
-                            priv->_3945.shared_phys);
-
-       iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
-               FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
-       return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
-       int rc;
-       int txq_id, slots_num;
-
-       iwl3945_hw_txq_ctx_free(priv);
-
-       /* allocate tx queue structure */
-       rc = iwl_legacy_alloc_txq_mem(priv);
-       if (rc)
-               return rc;
-
-       /* Tx CMD queue */
-       rc = iwl3945_tx_reset(priv);
-       if (rc)
-               goto error;
-
-       /* Tx queue(s) */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
-                               TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
-                                               slots_num, txq_id);
-               if (rc) {
-                       IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
-                       goto error;
-               }
-       }
-
-       return rc;
-
- error:
-       iwl3945_hw_txq_ctx_free(priv);
-       return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE:  This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
-       int ret = iwl_legacy_apm_init(priv);
-
-       /* Clear APMG (NIC's internal power management) interrupts */
-       iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
-       iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
-       /* Reset radio chip */
-       iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
-                               APMG_PS_CTRL_VAL_RESET_REQ);
-       udelay(5);
-       iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
-                               APMG_PS_CTRL_VAL_RESET_REQ);
-
-       return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       unsigned long flags;
-       u8 rev_id = priv->pci_dev->revision;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Determine HW type */
-       IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
-       if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
-               IWL_DEBUG_INFO(priv, "RTP type\n");
-       else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
-               IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
-       } else {
-               IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
-       }
-
-       if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
-               IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
-       } else
-               IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
-       if ((eeprom->board_revision & 0xF0) == 0xD0) {
-               IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
-                              eeprom->board_revision);
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
-       } else {
-               IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
-                              eeprom->board_revision);
-               iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
-                             CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
-       }
-
-       if (eeprom->almgor_m_version <= 1) {
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
-               IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
-                              eeprom->almgor_m_version);
-       } else {
-               IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
-                              eeprom->almgor_m_version);
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
-       }
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
-               IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
-       if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
-               IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
-       int rc;
-       unsigned long flags;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       priv->cfg->ops->lib->apm_ops.init(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl3945_set_pwr_vmain(priv);
-
-       priv->cfg->ops->lib->apm_ops.config(priv);
-
-       /* Allocate the RX queue, or reset if it is already allocated */
-       if (!rxq->bd) {
-               rc = iwl_legacy_rx_queue_alloc(priv);
-               if (rc) {
-                       IWL_ERR(priv, "Unable to initialize Rx queue\n");
-                       return -ENOMEM;
-               }
-       } else
-               iwl3945_rx_queue_reset(priv, rxq);
-
-       iwl3945_rx_replenish(priv);
-
-       iwl3945_rx_init(priv, rxq);
-
-
-       /* Look at using this instead:
-       rxq->need_update = 1;
-       iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-       */
-
-       iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
-       rc = iwl3945_txq_ctx_reset(priv);
-       if (rc)
-               return rc;
-
-       set_bit(STATUS_INIT, &priv->status);
-
-       return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
-       int txq_id;
-
-       /* Tx queues */
-       if (priv->txq)
-               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
-                    txq_id++)
-                       if (txq_id == IWL39_CMD_QUEUE_NUM)
-                               iwl_legacy_cmd_queue_free(priv);
-                       else
-                               iwl_legacy_tx_queue_free(priv, txq_id);
-
-       /* free tx queue structure */
-       iwl_legacy_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
-       int txq_id;
-
-       /* stop SCD */
-       iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
-       iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
-       /* reset TFD queues */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
-               iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
-                               FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
-                               1000);
-       }
-
-       iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
-       return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
-       return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
-       return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       int temperature;
-
-       temperature = iwl3945_hw_get_temperature(priv);
-
-       /* driver's okay range is -260 to +25.
-        *   human readable okay range is 0 to +285 */
-       IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
-       /* handle insane temp reading */
-       if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
-               IWL_ERR(priv, "Error bad temperature value  %d\n", temperature);
-
-               /* if really really hot(?),
-                *   substitute the 3rd band/group's temp measured at factory */
-               if (priv->last_temperature > 100)
-                       temperature = eeprom->groups[2].temperature;
-               else /* else use most recent "sane" value from driver */
-                       temperature = priv->last_temperature;
-       }
-
-       return temperature;     /* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER   6
-
-/**
- * iwl3945_is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- *    (assumes caller will actually do the calibration!). */
-static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
-{
-       int temp_diff;
-
-       priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
-       temp_diff = priv->temperature - priv->last_temperature;
-
-       /* get absolute value */
-       if (temp_diff < 0) {
-               IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
-               temp_diff = -temp_diff;
-       } else if (temp_diff == 0)
-               IWL_DEBUG_POWER(priv, "Same temp,\n");
-       else
-               IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
-       /* if we don't need calibration, *don't* update last_temperature */
-       if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
-               IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
-               return 0;
-       }
-
-       IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
-       /* assume that caller will actually do calib ...
-        *   update the "last temperature" value */
-       priv->last_temperature = priv->temperature;
-       return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF  -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
-       {
-        {251, 127},            /* 2.4 GHz, highest power */
-        {251, 127},
-        {251, 127},
-        {251, 127},
-        {251, 125},
-        {251, 110},
-        {251, 105},
-        {251, 98},
-        {187, 125},
-        {187, 115},
-        {187, 108},
-        {187, 99},
-        {243, 119},
-        {243, 111},
-        {243, 105},
-        {243, 97},
-        {243, 92},
-        {211, 106},
-        {211, 100},
-        {179, 120},
-        {179, 113},
-        {179, 107},
-        {147, 125},
-        {147, 119},
-        {147, 112},
-        {147, 106},
-        {147, 101},
-        {147, 97},
-        {147, 91},
-        {115, 107},
-        {235, 121},
-        {235, 115},
-        {235, 109},
-        {203, 127},
-        {203, 121},
-        {203, 115},
-        {203, 108},
-        {203, 102},
-        {203, 96},
-        {203, 92},
-        {171, 110},
-        {171, 104},
-        {171, 98},
-        {139, 116},
-        {227, 125},
-        {227, 119},
-        {227, 113},
-        {227, 107},
-        {227, 101},
-        {227, 96},
-        {195, 113},
-        {195, 106},
-        {195, 102},
-        {195, 95},
-        {163, 113},
-        {163, 106},
-        {163, 102},
-        {163, 95},
-        {131, 113},
-        {131, 106},
-        {131, 102},
-        {131, 95},
-        {99, 113},
-        {99, 106},
-        {99, 102},
-        {99, 95},
-        {67, 113},
-        {67, 106},
-        {67, 102},
-        {67, 95},
-        {35, 113},
-        {35, 106},
-        {35, 102},
-        {35, 95},
-        {3, 113},
-        {3, 106},
-        {3, 102},
-        {3, 95} },             /* 2.4 GHz, lowest power */
-       {
-        {251, 127},            /* 5.x GHz, highest power */
-        {251, 120},
-        {251, 114},
-        {219, 119},
-        {219, 101},
-        {187, 113},
-        {187, 102},
-        {155, 114},
-        {155, 103},
-        {123, 117},
-        {123, 107},
-        {123, 99},
-        {123, 92},
-        {91, 108},
-        {59, 125},
-        {59, 118},
-        {59, 109},
-        {59, 102},
-        {59, 96},
-        {59, 90},
-        {27, 104},
-        {27, 98},
-        {27, 92},
-        {115, 118},
-        {115, 111},
-        {115, 104},
-        {83, 126},
-        {83, 121},
-        {83, 113},
-        {83, 105},
-        {83, 99},
-        {51, 118},
-        {51, 111},
-        {51, 104},
-        {51, 98},
-        {19, 116},
-        {19, 109},
-        {19, 102},
-        {19, 98},
-        {19, 93},
-        {171, 113},
-        {171, 107},
-        {171, 99},
-        {139, 120},
-        {139, 113},
-        {139, 107},
-        {139, 99},
-        {107, 120},
-        {107, 113},
-        {107, 107},
-        {107, 99},
-        {75, 120},
-        {75, 113},
-        {75, 107},
-        {75, 99},
-        {43, 120},
-        {43, 113},
-        {43, 107},
-        {43, 99},
-        {11, 120},
-        {11, 113},
-        {11, 107},
-        {11, 99},
-        {131, 107},
-        {131, 99},
-        {99, 120},
-        {99, 113},
-        {99, 107},
-        {99, 99},
-        {67, 120},
-        {67, 113},
-        {67, 107},
-        {67, 99},
-        {35, 120},
-        {35, 113},
-        {35, 107},
-        {35, 99},
-        {3, 120} }             /* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
-       if (index < 0)
-               return 0;
-       if (index >= IWL_MAX_GAIN_ENTRIES)
-               return IWL_MAX_GAIN_ENTRIES - 1;
-       return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
-                              s32 rate_index, const s8 *clip_pwrs,
-                              struct iwl_channel_info *ch_info,
-                              int band_index)
-{
-       struct iwl3945_scan_power_info *scan_power_info;
-       s8 power;
-       u8 power_index;
-
-       scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
-       /* use this channel group's 6Mbit clipping/saturation pwr,
-        *   but cap at regulatory scan power restriction (set during init
-        *   based on eeprom channel data) for this channel.  */
-       power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
-       power = min(power, priv->tx_power_user_lmt);
-       scan_power_info->requested_power = power;
-
-       /* find difference between new scan *power* and current "normal"
-        *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
-        *   current "normal" temperature-compensated Tx power *index* for
-        *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
-        *   *index*. */
-       power_index = ch_info->power_info[rate_index].power_table_index
-           - (power - ch_info->power_info
-              [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
-       /* store reference index that we use when adjusting *all* scan
-        *   powers.  So we can accommodate user (all channel) or spectrum
-        *   management (single channel) power changes "between" temperature
-        *   feedback compensation procedures.
-        * don't force fit this reference index into gain table; it may be a
-        *   negative number.  This will help avoid errors when we're at
-        *   the lower bounds (highest gains, for warmest temperatures)
-        *   of the table. */
-
-       /* don't exceed table bounds for "real" setting */
-       power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
-       scan_power_info->power_table_index = power_index;
-       scan_power_info->tpc.tx_gain =
-           power_gain_table[band_index][power_index].tx_gain;
-       scan_power_info->tpc.dsp_atten =
-           power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
-       int rate_idx, i;
-       const struct iwl_channel_info *ch_info = NULL;
-       struct iwl3945_txpowertable_cmd txpower = {
-               .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
-       };
-       u16 chan;
-
-       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
-                     "TX Power requested while scanning!\n"))
-               return -EAGAIN;
-
-       chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
-       txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
-       ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
-       if (!ch_info) {
-               IWL_ERR(priv,
-                       "Failed to get channel info for channel %d [%d]\n",
-                       chan, priv->band);
-               return -EINVAL;
-       }
-
-       if (!iwl_legacy_is_channel_valid(ch_info)) {
-               IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
-                               "non-Tx channel.\n");
-               return 0;
-       }
-
-       /* fill cmd with power settings for all rates for current channel */
-       /* Fill OFDM rate */
-       for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
-            rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
-               txpower.power[i].tpc = ch_info->power_info[i].tpc;
-               txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
-               IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
-                               le16_to_cpu(txpower.channel),
-                               txpower.band,
-                               txpower.power[i].tpc.tx_gain,
-                               txpower.power[i].tpc.dsp_atten,
-                               txpower.power[i].rate);
-       }
-       /* Fill CCK rates */
-       for (rate_idx = IWL_FIRST_CCK_RATE;
-            rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
-               txpower.power[i].tpc = ch_info->power_info[i].tpc;
-               txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
-               IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
-                               le16_to_cpu(txpower.channel),
-                               txpower.band,
-                               txpower.power[i].tpc.tx_gain,
-                               txpower.power[i].tpc.dsp_atten,
-                               txpower.power[i].rate);
-       }
-
-       return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
-                               sizeof(struct iwl3945_txpowertable_cmd),
-                               &txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update.  Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- *      properly fill out the scan powers, and actual h/w gain settings,
- *      and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
-                            struct iwl_channel_info *ch_info)
-{
-       struct iwl3945_channel_power_info *power_info;
-       int power_changed = 0;
-       int i;
-       const s8 *clip_pwrs;
-       int power;
-
-       /* Get this chnlgrp's rate-to-max/clip-powers table */
-       clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-       /* Get this channel's rate-to-current-power settings table */
-       power_info = ch_info->power_info;
-
-       /* update OFDM Txpower settings */
-       for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
-            i++, ++power_info) {
-               int delta_idx;
-
-               /* limit new power to be no more than h/w capability */
-               power = min(ch_info->curr_txpow, clip_pwrs[i]);
-               if (power == power_info->requested_power)
-                       continue;
-
-               /* find difference between old and new requested powers,
-                *    update base (non-temp-compensated) power index */
-               delta_idx = (power - power_info->requested_power) * 2;
-               power_info->base_power_index -= delta_idx;
-
-               /* save new requested power value */
-               power_info->requested_power = power;
-
-               power_changed = 1;
-       }
-
-       /* update CCK Txpower settings, based on OFDM 12M setting ...
-        *    ... all CCK power settings for a given channel are the *same*. */
-       if (power_changed) {
-               power =
-                   ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
-                   requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
-               /* do all CCK rates' iwl3945_channel_power_info structures */
-               for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
-                       power_info->requested_power = power;
-                       power_info->base_power_index =
-                           ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
-                           base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
-                       ++power_info;
-               }
-       }
-
-       return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- *      based strictly on regulatory (eeprom and spectrum mgt) limitations
- *      (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
-       s8 max_power;
-
-#if 0
-       /* if we're using TGd limits, use lower of TGd or EEPROM */
-       if (ch_info->tgd_data.max_power != 0)
-               max_power = min(ch_info->tgd_data.max_power,
-                               ch_info->eeprom.max_power_avg);
-
-       /* else just use EEPROM limits */
-       else
-#endif
-               max_power = ch_info->eeprom.max_power_avg;
-
-       return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- *   and the factory calibration temperatures, and bases the new settings
- *   on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
-       struct iwl_channel_info *ch_info = NULL;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       int delta_index;
-       const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
-       u8 a_band;
-       u8 rate_index;
-       u8 scan_tbl_index;
-       u8 i;
-       int ref_temp;
-       int temperature = priv->temperature;
-
-       if (priv->disable_tx_power_cal ||
-           test_bit(STATUS_SCANNING, &priv->status)) {
-               /* do not perform tx power calibration */
-               return 0;
-       }
-       /* set up new Tx power info for each and every channel, 2.4 and 5.x */
-       for (i = 0; i < priv->channel_count; i++) {
-               ch_info = &priv->channel_info[i];
-               a_band = iwl_legacy_is_channel_a_band(ch_info);
-
-               /* Get this chnlgrp's factory calibration temperature */
-               ref_temp = (s16)eeprom->groups[ch_info->group_index].
-                   temperature;
-
-               /* get power index adjustment based on current and factory
-                * temps */
-               delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
-                                                             ref_temp);
-
-               /* set tx power value for all rates, OFDM and CCK */
-               for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
-                    rate_index++) {
-                       int power_idx =
-                           ch_info->power_info[rate_index].base_power_index;
-
-                       /* temperature compensate */
-                       power_idx += delta_index;
-
-                       /* stay within table range */
-                       power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-                       ch_info->power_info[rate_index].
-                           power_table_index = (u8) power_idx;
-                       ch_info->power_info[rate_index].tpc =
-                           power_gain_table[a_band][power_idx];
-               }
-
-               /* Get this chnlgrp's rate-to-max/clip-powers table */
-               clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
-               for (scan_tbl_index = 0;
-                    scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
-                       s32 actual_index = (scan_tbl_index == 0) ?
-                           IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
-                       iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
-                                          actual_index, clip_pwrs,
-                                          ch_info, a_band);
-               }
-       }
-
-       /* send Txpower command for current channel to ucode */
-       return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
-       struct iwl_channel_info *ch_info;
-       s8 max_power;
-       u8 a_band;
-       u8 i;
-
-       if (priv->tx_power_user_lmt == power) {
-               IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
-                               "limit: %ddBm.\n", power);
-               return 0;
-       }
-
-       IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
-       priv->tx_power_user_lmt = power;
-
-       /* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
-       for (i = 0; i < priv->channel_count; i++) {
-               ch_info = &priv->channel_info[i];
-               a_band = iwl_legacy_is_channel_a_band(ch_info);
-
-               /* find minimum power of all user and regulatory constraints
-                *    (does not consider h/w clipping limitations) */
-               max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
-               max_power = min(power, max_power);
-               if (max_power != ch_info->curr_txpow) {
-                       ch_info->curr_txpow = max_power;
-
-                       /* this considers the h/w clipping limitations */
-                       iwl3945_hw_reg_set_new_power(priv, ch_info);
-               }
-       }
-
-       /* update txpower settings for all channels,
-        *   send to NIC if associated. */
-       iwl3945_is_temp_calib_needed(priv);
-       iwl3945_hw_reg_comp_txpower_temp(priv);
-
-       return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
-                                  struct iwl_rxon_context *ctx)
-{
-       int rc = 0;
-       struct iwl_rx_packet *pkt;
-       struct iwl3945_rxon_assoc_cmd rxon_assoc;
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_RXON_ASSOC,
-               .len = sizeof(rxon_assoc),
-               .flags = CMD_WANT_SKB,
-               .data = &rxon_assoc,
-       };
-       const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
-       const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
-       if ((rxon1->flags == rxon2->flags) &&
-           (rxon1->filter_flags == rxon2->filter_flags) &&
-           (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
-           (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
-               IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
-               return 0;
-       }
-
-       rxon_assoc.flags = ctx->staging.flags;
-       rxon_assoc.filter_flags = ctx->staging.filter_flags;
-       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
-       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
-       rxon_assoc.reserved = 0;
-
-       rc = iwl_legacy_send_cmd_sync(priv, &cmd);
-       if (rc)
-               return rc;
-
-       pkt = (struct iwl_rx_packet *)cmd.reply_page;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
-               rc = -EIO;
-       }
-
-       iwl_legacy_free_pages(priv, cmd.reply_page);
-
-       return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data.  This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       /* cast away the const for active_rxon in this function */
-       struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
-       struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
-       int rc = 0;
-       bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return -EINVAL;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -1;
-
-       /* always get timestamp with Rx frame */
-       staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
-       /* select antenna */
-       staging_rxon->flags &=
-           ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
-       staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
-       rc = iwl_legacy_check_rxon_cmd(priv, ctx);
-       if (rc) {
-               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-               return -EINVAL;
-       }
-
-       /* If we don't need to send a full RXON, we can use
-        * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
-        * and other flags for the current radio configuration. */
-       if (!iwl_legacy_full_rxon_required(priv,
-                       &priv->contexts[IWL_RXON_CTX_BSS])) {
-               rc = iwl_legacy_send_rxon_assoc(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-               if (rc) {
-                       IWL_ERR(priv, "Error setting RXON_ASSOC "
-                                 "configuration (%d).\n", rc);
-                       return rc;
-               }
-
-               memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-               /*
-                * We do not commit tx power settings while channel changing,
-                * do it now if tx power changed.
-                */
-               iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-               return 0;
-       }
-
-       /* If we are currently associated and the new config requires
-        * an RXON_ASSOC and the new config wants the associated mask enabled,
-        * we must clear the associated from the active configuration
-        * before we apply the new config */
-       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
-               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-               /*
-                * reserved4 and 5 could have been filled by the iwlcore code.
-                * Let's clear them before pushing to the 3945.
-                */
-               active_rxon->reserved4 = 0;
-               active_rxon->reserved5 = 0;
-               rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
-                                     sizeof(struct iwl3945_rxon_cmd),
-                                     &priv->contexts[IWL_RXON_CTX_BSS].active);
-
-               /* If the mask clearing failed then we set
-                * active_rxon back to what it was previously */
-               if (rc) {
-                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-                       IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
-                                 "configuration (%d).\n", rc);
-                       return rc;
-               }
-               iwl_legacy_clear_ucode_stations(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-               iwl_legacy_restore_stations(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-       }
-
-       IWL_DEBUG_INFO(priv, "Sending RXON\n"
-                      "* with%s RXON_FILTER_ASSOC_MSK\n"
-                      "* channel = %d\n"
-                      "* bssid = %pM\n",
-                      (new_assoc ? "" : "out"),
-                      le16_to_cpu(staging_rxon->channel),
-                      staging_rxon->bssid_addr);
-
-       /*
-        * reserved4 and 5 could have been filled by the iwlcore code.
-        * Let's clear them before pushing to the 3945.
-        */
-       staging_rxon->reserved4 = 0;
-       staging_rxon->reserved5 = 0;
-
-       iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
-       /* Apply the new configuration */
-       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
-                             sizeof(struct iwl3945_rxon_cmd),
-                             staging_rxon);
-       if (rc) {
-               IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
-               return rc;
-       }
-
-       memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
-       if (!new_assoc) {
-               iwl_legacy_clear_ucode_stations(priv,
-                                        &priv->contexts[IWL_RXON_CTX_BSS]);
-               iwl_legacy_restore_stations(priv,
-                                       &priv->contexts[IWL_RXON_CTX_BSS]);
-       }
-
-       /* If we issue a new RXON command which required a tune then we must
-        * send a new TXPOWER command or we won't be able to Tx any frames */
-       rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
-       if (rc) {
-               IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
-               return rc;
-       }
-
-       /* Init the hardware's rate fallback order based on the band */
-       rc = iwl3945_init_hw_rate_table(priv);
-       if (rc) {
-               IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic -  called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- *     -- correct coeffs for temp (can reset temp timer)
- *     -- save this temp as "last",
- *     -- send new set of gain settings to NIC
- * NOTE:  This should continue working, even when we're not associated,
- *   so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
-       /* This will kick in the "brute force"
-        * iwl3945_hw_reg_comp_txpower_temp() below */
-       if (!iwl3945_is_temp_calib_needed(priv))
-               goto reschedule;
-
-       /* Set up a new set of temp-adjusted TxPowers, send to NIC.
-        * This is based *only* on current temperature,
-        * ignoring any previous power measurements */
-       iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
-       queue_delayed_work(priv->workqueue,
-                          &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                                            _3945.thermal_periodic.work);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-       iwl3945_reg_txpower_periodic(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- *                                for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- *      These channel groups are based on factory-tested channels;
- *      on A-band, EEPROM's "group frequency" entries represent the top
- *      channel in each group 1-4.  Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
-                                      const struct iwl_channel_info *ch_info)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
-       u8 group;
-       u16 group_index = 0;    /* based on factory calib frequencies */
-       u8 grp_channel;
-
-       /* Find the group index for the channel ... don't use index 1(?) */
-       if (iwl_legacy_is_channel_a_band(ch_info)) {
-               for (group = 1; group < 5; group++) {
-                       grp_channel = ch_grp[group].group_channel;
-                       if (ch_info->channel <= grp_channel) {
-                               group_index = group;
-                               break;
-                       }
-               }
-               /* group 4 has a few channels *above* its factory cal freq */
-               if (group == 5)
-                       group_index = 4;
-       } else
-               group_index = 0;        /* 2.4 GHz, group 0 */
-
-       IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
-                       group_index);
-       return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- *   into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
-                                      s8 requested_power,
-                                      s32 setting_index, s32 *new_index)
-{
-       const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       s32 index0, index1;
-       s32 power = 2 * requested_power;
-       s32 i;
-       const struct iwl3945_eeprom_txpower_sample *samples;
-       s32 gains0, gains1;
-       s32 res;
-       s32 denominator;
-
-       chnl_grp = &eeprom->groups[setting_index];
-       samples = chnl_grp->samples;
-       for (i = 0; i < 5; i++) {
-               if (power == samples[i].power) {
-                       *new_index = samples[i].gain_index;
-                       return 0;
-               }
-       }
-
-       if (power > samples[1].power) {
-               index0 = 0;
-               index1 = 1;
-       } else if (power > samples[2].power) {
-               index0 = 1;
-               index1 = 2;
-       } else if (power > samples[3].power) {
-               index0 = 2;
-               index1 = 3;
-       } else {
-               index0 = 3;
-               index1 = 4;
-       }
-
-       denominator = (s32) samples[index1].power - (s32) samples[index0].power;
-       if (denominator == 0)
-               return -EINVAL;
-       gains0 = (s32) samples[index0].gain_index * (1 << 19);
-       gains1 = (s32) samples[index1].gain_index * (1 << 19);
-       res = gains0 + (gains1 - gains0) *
-           ((s32) power - (s32) samples[index0].power) / denominator +
-           (1 << 18);
-       *new_index = res >> 19;
-       return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
-       u32 i;
-       s32 rate_index;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       const struct iwl3945_eeprom_txpower_group *group;
-
-       IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
-       for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
-               s8 *clip_pwrs;  /* table of power levels for each rate */
-               s8 satur_pwr;   /* saturation power for each chnl group */
-               group = &eeprom->groups[i];
-
-               /* sanity check on factory saturation power value */
-               if (group->saturation_power < 40) {
-                       IWL_WARN(priv, "Error: saturation power is %d, "
-                                   "less than minimum expected 40\n",
-                                   group->saturation_power);
-                       return;
-               }
-
-               /*
-                * Derive requested power levels for each rate, based on
-                *   hardware capabilities (saturation power for band).
-                * Basic value is 3dB down from saturation, with further
-                *   power reductions for highest 3 data rates.  These
-                *   backoffs provide headroom for high rate modulation
-                *   power peaks, without too much distortion (clipping).
-                */
-               /* we'll fill in this array with h/w max power levels */
-               clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
-               /* divide factory saturation power by 2 to find -3dB level */
-               satur_pwr = (s8) (group->saturation_power >> 1);
-
-               /* fill in channel group's nominal powers for each rate */
-               for (rate_index = 0;
-                    rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
-                       switch (rate_index) {
-                       case IWL_RATE_36M_INDEX_TABLE:
-                               if (i == 0)     /* B/G */
-                                       *clip_pwrs = satur_pwr;
-                               else    /* A */
-                                       *clip_pwrs = satur_pwr - 5;
-                               break;
-                       case IWL_RATE_48M_INDEX_TABLE:
-                               if (i == 0)
-                                       *clip_pwrs = satur_pwr - 7;
-                               else
-                                       *clip_pwrs = satur_pwr - 10;
-                               break;
-                       case IWL_RATE_54M_INDEX_TABLE:
-                               if (i == 0)
-                                       *clip_pwrs = satur_pwr - 9;
-                               else
-                                       *clip_pwrs = satur_pwr - 12;
-                               break;
-                       default:
-                               *clip_pwrs = satur_pwr;
-                               break;
-                       }
-               }
-       }
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
-       struct iwl_channel_info *ch_info = NULL;
-       struct iwl3945_channel_power_info *pwr_info;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       int delta_index;
-       u8 rate_index;
-       u8 scan_tbl_index;
-       const s8 *clip_pwrs;    /* array of power levels for each rate */
-       u8 gain, dsp_atten;
-       s8 power;
-       u8 pwr_index, base_pwr_index, a_band;
-       u8 i;
-       int temperature;
-
-       /* save temperature reference,
-        *   so we can determine next time to calibrate */
-       temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
-       priv->last_temperature = temperature;
-
-       iwl3945_hw_reg_init_channel_groups(priv);
-
-       /* initialize Tx power info for each and every channel, 2.4 and 5.x */
-       for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
-            i++, ch_info++) {
-               a_band = iwl_legacy_is_channel_a_band(ch_info);
-               if (!iwl_legacy_is_channel_valid(ch_info))
-                       continue;
-
-               /* find this channel's channel group (*not* "band") index */
-               ch_info->group_index =
-                       iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
-               /* Get this chnlgrp's rate->max/clip-powers table */
-               clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
-               /* calculate power index *adjustment* value according to
-                *  diff between current temperature and factory temperature */
-               delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
-                               eeprom->groups[ch_info->group_index].
-                               temperature);
-
-               IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
-                               ch_info->channel, delta_index, temperature +
-                               IWL_TEMP_CONVERT);
-
-               /* set tx power value for all OFDM rates */
-               for (rate_index = 0; rate_index < IWL_OFDM_RATES;
-                    rate_index++) {
-                       s32 uninitialized_var(power_idx);
-                       int rc;
-
-                       /* use channel group's clip-power table,
-                        *   but don't exceed channel's max power */
-                       s8 pwr = min(ch_info->max_power_avg,
-                                    clip_pwrs[rate_index]);
-
-                       pwr_info = &ch_info->power_info[rate_index];
-
-                       /* get base (i.e. at factory-measured temperature)
-                        *    power table index for this rate's power */
-                       rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
-                                                        ch_info->group_index,
-                                                        &power_idx);
-                       if (rc) {
-                               IWL_ERR(priv, "Invalid power index\n");
-                               return rc;
-                       }
-                       pwr_info->base_power_index = (u8) power_idx;
-
-                       /* temperature compensate */
-                       power_idx += delta_index;
-
-                       /* stay within range of gain table */
-                       power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
-                       /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
-                       pwr_info->requested_power = pwr;
-                       pwr_info->power_table_index = (u8) power_idx;
-                       pwr_info->tpc.tx_gain =
-                           power_gain_table[a_band][power_idx].tx_gain;
-                       pwr_info->tpc.dsp_atten =
-                           power_gain_table[a_band][power_idx].dsp_atten;
-               }
-
-               /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
-               pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
-               power = pwr_info->requested_power +
-                       IWL_CCK_FROM_OFDM_POWER_DIFF;
-               pwr_index = pwr_info->power_table_index +
-                       IWL_CCK_FROM_OFDM_INDEX_DIFF;
-               base_pwr_index = pwr_info->base_power_index +
-                       IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
-               /* stay within table range */
-               pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
-               gain = power_gain_table[a_band][pwr_index].tx_gain;
-               dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
-               /* fill each CCK rate's iwl3945_channel_power_info structure
-                * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
-                * NOTE:  CCK rates start at end of OFDM rates! */
-               for (rate_index = 0;
-                    rate_index < IWL_CCK_RATES; rate_index++) {
-                       pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
-                       pwr_info->requested_power = power;
-                       pwr_info->power_table_index = pwr_index;
-                       pwr_info->base_power_index = base_pwr_index;
-                       pwr_info->tpc.tx_gain = gain;
-                       pwr_info->tpc.dsp_atten = dsp_atten;
-               }
-
-               /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
-               for (scan_tbl_index = 0;
-                    scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
-                       s32 actual_index = (scan_tbl_index == 0) ?
-                               IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
-                       iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
-                               actual_index, clip_pwrs, ch_info, a_band);
-               }
-       }
-
-       return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
-       int rc;
-
-       iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
-       rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
-                       FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-       if (rc < 0)
-               IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
-       return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       int txq_id = txq->q.id;
-
-       struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
-       shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
-       iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
-       iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
-       iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
-               FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
-               FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
-               FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
-               FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
-               FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
-       /* fake read to flush all prev. writes */
-       iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
-       return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
-       switch (cmd_id) {
-       case REPLY_RXON:
-               return sizeof(struct iwl3945_rxon_cmd);
-       case POWER_TABLE_CMD:
-               return sizeof(struct iwl3945_powertable_cmd);
-       default:
-               return len;
-       }
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
-                                                               u8 *data)
-{
-       struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
-       addsta->mode = cmd->mode;
-       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
-       memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
-       addsta->station_flags = cmd->station_flags;
-       addsta->station_flags_msk = cmd->station_flags_msk;
-       addsta->tid_disable_tx = cpu_to_le16(0);
-       addsta->rate_n_flags = cmd->rate_n_flags;
-       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
-       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
-       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
-       return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
-                                    const u8 *addr, u8 *sta_id_r)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       int ret;
-       u8 sta_id;
-       unsigned long flags;
-
-       if (sta_id_r)
-               *sta_id_r = IWL_INVALID_STATION;
-
-       ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM\n", addr);
-               return ret;
-       }
-
-       if (sta_id_r)
-               *sta_id_r = sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].used |= IWL_STA_LOCAL;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
-                                      struct ieee80211_vif *vif, bool add)
-{
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       int ret;
-
-       if (add) {
-               ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
-                                               &vif_priv->ibss_bssid_sta_id);
-               if (ret)
-                       return ret;
-
-               iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
-                                (priv->band == IEEE80211_BAND_5GHZ) ?
-                                IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
-               iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
-               return 0;
-       }
-
-       return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
-                                 vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
-       int rc, i, index, prev_index;
-       struct iwl3945_rate_scaling_cmd rate_cmd = {
-               .reserved = {0, 0, 0},
-       };
-       struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
-       for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
-               index = iwl3945_rates[i].table_rs_index;
-
-               table[index].rate_n_flags =
-                       iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
-               table[index].try_cnt = priv->retry_rate;
-               prev_index = iwl3945_get_prev_ieee_rate(i);
-               table[index].next_rate_index =
-                               iwl3945_rates[prev_index].table_rs_index;
-       }
-
-       switch (priv->band) {
-       case IEEE80211_BAND_5GHZ:
-               IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
-               /* If one of the following CCK rates is used,
-                * have it fall back to the 6M OFDM rate */
-               for (i = IWL_RATE_1M_INDEX_TABLE;
-                       i <= IWL_RATE_11M_INDEX_TABLE; i++)
-                       table[i].next_rate_index =
-                         iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
-               /* Don't fall back to CCK rates */
-               table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
-                                               IWL_RATE_9M_INDEX_TABLE;
-
-               /* Don't drop out of OFDM rates */
-               table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
-                   iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-               break;
-
-       case IEEE80211_BAND_2GHZ:
-               IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
-               /* If an OFDM rate is used, have it fall back to the
-                * 1M CCK rates */
-
-               if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
-                   iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
-                       index = IWL_FIRST_CCK_RATE;
-                       for (i = IWL_RATE_6M_INDEX_TABLE;
-                            i <= IWL_RATE_54M_INDEX_TABLE; i++)
-                               table[i].next_rate_index =
-                                       iwl3945_rates[index].table_rs_index;
-
-                       index = IWL_RATE_11M_INDEX_TABLE;
-                       /* CCK shouldn't fall back to OFDM... */
-                       table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
-               }
-               break;
-
-       default:
-               WARN_ON(1);
-               break;
-       }
-
-       /* Update the rate scaling for control frame Tx */
-       rate_cmd.table_id = 0;
-       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
-                             &rate_cmd);
-       if (rc)
-               return rc;
-
-       /* Update the rate scaling for data frame Tx */
-       rate_cmd.table_id = 1;
-       return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
-                               &rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
-       memset((void *)&priv->hw_params, 0,
-              sizeof(struct iwl_hw_params));
-
-       priv->_3945.shared_virt =
-               dma_alloc_coherent(&priv->pci_dev->dev,
-                                  sizeof(struct iwl3945_shared),
-                                  &priv->_3945.shared_phys, GFP_KERNEL);
-       if (!priv->_3945.shared_virt) {
-               IWL_ERR(priv, "failed to allocate pci memory\n");
-               return -ENOMEM;
-       }
-
-       /* Assign number of Usable TX queues */
-       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
-       priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
-       priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
-       priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
-       priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
-       priv->hw_params.max_stations = IWL3945_STATION_COUNT;
-       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
-       priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
-       priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
-       priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
-       priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
-       return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
-                         struct iwl3945_frame *frame, u8 rate)
-{
-       struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
-       unsigned int frame_size;
-
-       tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
-       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
-       tx_beacon_cmd->tx.sta_id =
-               priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-       frame_size = iwl3945_fill_beacon_frame(priv,
-                               tx_beacon_cmd->frame,
-                               sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
-       BUG_ON(frame_size > MAX_MPDU_SIZE);
-       tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
-       tx_beacon_cmd->tx.rate = rate;
-       tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
-                                     TX_CMD_FLG_TSF_MSK);
-
-       /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
-       tx_beacon_cmd->tx.supp_rates[0] =
-               (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
-       tx_beacon_cmd->tx.supp_rates[1] =
-               (IWL_CCK_BASIC_RATES_MASK & 0xF);
-
-       return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
-       priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
-       priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
-       INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
-                         iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
-       cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       u32 reg;
-       u32 val;
-
-       IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
-       /* verify BSM SRAM contents */
-       val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
-       for (reg = BSM_SRAM_LOWER_BOUND;
-            reg < BSM_SRAM_LOWER_BOUND + len;
-            reg += sizeof(u32), image++) {
-               val = iwl_legacy_read_prph(priv, reg);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "BSM uCode verification failed at "
-                                 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
-                                 BSM_SRAM_LOWER_BOUND,
-                                 reg - BSM_SRAM_LOWER_BOUND, len,
-                                 val, le32_to_cpu(*image));
-                       return -EIO;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
-       return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism.  Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
-       _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
-       return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
-       return;
-}
-
- /**
-  * iwl3945_load_bsm - Load bootstrap instructions
-  *
-  * BSM operation:
-  *
-  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
-  * in special SRAM that does not power down during RFKILL.  When powering back
-  * up after power-saving sleeps (or during initial uCode load), the BSM loads
-  * the bootstrap program into the on-board processor, and starts it.
-  *
-  * The bootstrap program loads (via DMA) instructions and data for a new
-  * program from host DRAM locations indicated by the host driver in the
-  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
-  * automatically.
-  *
-  * When initializing the NIC, the host driver points the BSM to the
-  * "initialize" uCode image.  This uCode sets up some internal data, then
-  * notifies host via "initialize alive" that it is complete.
-  *
-  * The host then replaces the BSM_DRAM_* pointer values to point to the
-  * normal runtime uCode instructions and a backup uCode data cache buffer
-  * (filled initially with starting data values for the on-board processor),
-  * then triggers the "initialize" uCode to load and launch the runtime uCode,
-  * which begins normal operation.
-  *
-  * When doing a power-save shutdown, runtime uCode saves data SRAM into
-  * the backup data cache in DRAM before SRAM is powered down.
-  *
-  * When powering back up, the BSM loads the bootstrap program.  This reloads
-  * the runtime uCode instructions and the backup data cache into SRAM,
-  * and re-launches the runtime uCode from where it left off.
-  */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-       u32 inst_len;
-       u32 data_len;
-       int rc;
-       int i;
-       u32 done;
-       u32 reg_offset;
-
-       IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
-       /* make sure bootstrap program is no larger than BSM's SRAM size */
-       if (len > IWL39_MAX_BSM_SIZE)
-               return -EINVAL;
-
-       /* Tell bootstrap uCode where to find the "Initialize" uCode
-       *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
-       * NOTE:  iwl3945_initialize_alive_start() will replace these values,
-       *        after the "initialize" uCode has run, to point to
-       *        runtime/protocol instructions and backup data cache. */
-       pinst = priv->ucode_init.p_addr;
-       pdata = priv->ucode_init_data.p_addr;
-       inst_len = priv->ucode_init.len;
-       data_len = priv->ucode_init_data.len;
-
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
-       /* Fill BSM memory with bootstrap instructions */
-       for (reg_offset = BSM_SRAM_LOWER_BOUND;
-            reg_offset < BSM_SRAM_LOWER_BOUND + len;
-            reg_offset += sizeof(u32), image++)
-               _iwl_legacy_write_prph(priv, reg_offset,
-                                         le32_to_cpu(*image));
-
-       rc = iwl3945_verify_bsm(priv);
-       if (rc)
-               return rc;
-
-       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-       iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-       iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
-                                IWL39_RTC_INST_LOWER_BOUND);
-       iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
-       /* Load bootstrap code into instruction SRAM now,
-        *   to prepare to load "initialize" uCode */
-       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
-               BSM_WR_CTRL_REG_BIT_START);
-
-       /* Wait for load of bootstrap uCode to finish */
-       for (i = 0; i < 100; i++) {
-               done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
-               if (!(done & BSM_WR_CTRL_REG_BIT_START))
-                       break;
-               udelay(10);
-       }
-       if (i < 100)
-               IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
-       else {
-               IWL_ERR(priv, "BSM write did not complete!\n");
-               return -EIO;
-       }
-
-       /* Enable future boot loads whenever power management unit triggers it
-        *   (e.g. when powering back up after power-save shutdown) */
-       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
-               BSM_WR_CTRL_REG_BIT_START_EN);
-
-       return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
-       .rxon_assoc = iwl3945_send_rxon_assoc,
-       .commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
-       .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl3945_hw_txq_free_tfd,
-       .txq_init = iwl3945_hw_tx_queue_init,
-       .load_ucode = iwl3945_load_bsm,
-       .dump_nic_error_log = iwl3945_dump_nic_error_log,
-       .apm_ops = {
-               .init = iwl3945_apm_init,
-               .config = iwl3945_nic_config,
-       },
-       .eeprom_ops = {
-               .regulatory_bands = {
-                       EEPROM_REGULATORY_BAND_1_CHANNELS,
-                       EEPROM_REGULATORY_BAND_2_CHANNELS,
-                       EEPROM_REGULATORY_BAND_3_CHANNELS,
-                       EEPROM_REGULATORY_BAND_4_CHANNELS,
-                       EEPROM_REGULATORY_BAND_5_CHANNELS,
-                       EEPROM_REGULATORY_BAND_NO_HT40,
-                       EEPROM_REGULATORY_BAND_NO_HT40,
-               },
-               .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
-               .release_semaphore = iwl3945_eeprom_release_semaphore,
-       },
-       .send_tx_power  = iwl3945_send_tx_power,
-       .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-
-       .debugfs_ops = {
-               .rx_stats_read = iwl3945_ucode_rx_stats_read,
-               .tx_stats_read = iwl3945_ucode_tx_stats_read,
-               .general_stats_read = iwl3945_ucode_general_stats_read,
-       },
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
-       .post_associate = iwl3945_post_associate,
-       .config_ap = iwl3945_config_ap,
-       .manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
-       .get_hcmd_size = iwl3945_get_hcmd_size,
-       .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
-       .request_scan = iwl3945_request_scan,
-       .post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
-       .lib = &iwl3945_lib,
-       .hcmd = &iwl3945_hcmd,
-       .utils = &iwl3945_hcmd_utils,
-       .led = &iwl3945_led_ops,
-       .legacy = &iwl3945_legacy_ops,
-       .ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
-       .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
-       .num_of_queues = IWL39_NUM_QUEUES,
-       .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
-       .set_l0s = false,
-       .use_bsm = true,
-       .led_compensation = 64,
-       .wd_timeout = IWL_DEF_WD_TIMEOUT,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
-       .name = "3945BG",
-       .fw_name_pre = IWL3945_FW_PRE,
-       .ucode_api_max = IWL3945_UCODE_API_MAX,
-       .ucode_api_min = IWL3945_UCODE_API_MIN,
-       .sku = IWL_SKU_G,
-       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
-       .ops = &iwl3945_ops,
-       .mod_params = &iwl3945_mod_params,
-       .base_params = &iwl3945_base_params,
-       .led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
-       .name = "3945ABG",
-       .fw_name_pre = IWL3945_FW_PRE,
-       .ucode_api_max = IWL3945_UCODE_API_MAX,
-       .ucode_api_min = IWL3945_UCODE_API_MIN,
-       .sku = IWL_SKU_A|IWL_SKU_G,
-       .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
-       .ops = &iwl3945_ops,
-       .mod_params = &iwl3945_mod_params,
-       .base_params = &iwl3945_base_params,
-       .led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
-       {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
-       {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
-       {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
-       {0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644 (file)
index b118b59..0000000
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE "iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
- *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- *   Also, -127 works better than 0 when averaging frames with/without
- *   noise info (e.g. averaging might be done in app); measured dBm values are
- *   always negative ... using a negative value as the default keeps all
- *   averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
-       u64 data;
-       s32 success_counter;
-       s32 success_ratio;
-       s32 counter;
-       s32 average_tpt;
-       unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
-       spinlock_t lock;
-       struct iwl_priv *priv;
-       s32 *expected_tpt;
-       unsigned long last_partial_flush;
-       unsigned long last_flush;
-       u32 flush_time;
-       u32 last_tx_packets;
-       u32 tx_packets;
-       u8 tgg;
-       u8 flush_pending;
-       u8 start_rate;
-       struct timer_list rate_scale_flush;
-       struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
-       struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
-       /* used to be in sta_info */
-       int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl3945_sta_priv {
-       struct iwl_station_priv_common common;
-       struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
-       IWL_ANTENNA_DIVERSITY,
-       IWL_ANTENNA_MAIN,
-       IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- *   a value of 0 means RTS on all data/management packets
- *   a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- *   than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD     2347U
-#define MIN_RTS_THRESHOLD         0U
-#define MAX_RTS_THRESHOLD         2347U
-#define MAX_MSDU_SIZE            2304U
-#define MAX_MPDU_SIZE            2346U
-#define DEFAULT_BEACON_INTERVAL   100U
-#define        DEFAULT_SHORT_RETRY_LIMIT 7U
-#define        DEFAULT_LONG_RETRY_LIMIT  4U
-
-#define IWL_TX_FIFO_AC0        0
-#define IWL_TX_FIFO_AC1        1
-#define IWL_TX_FIFO_AC2        2
-#define IWL_TX_FIFO_AC3        3
-#define IWL_TX_FIFO_HCCA_1     5
-#define IWL_TX_FIFO_HCCA_2     6
-#define IWL_TX_FIFO_NONE       7
-
-#define IEEE80211_DATA_LEN              2304
-#define IEEE80211_4ADDR_LEN             30
-#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
-       union {
-               struct ieee80211_hdr frame;
-               struct iwl3945_tx_beacon_cmd beacon;
-               u8 raw[IEEE80211_FRAME_LEN];
-               u8 cmd[360];
-       } u;
-       struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
-
-#define IWL_SUPPORTED_RATES_IE_LEN         8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT        9
-
-#define IWL_INVALID_RATE     0xFF
-#define IWL_INVALID_VALUE    -1
-
-#define STA_PS_STATUS_WAKE             0
-#define STA_PS_STATUS_SLEEP            1
-
-struct iwl3945_ibss_seq {
-       u8 mac[ETH_ALEN];
-       u16 seq_num;
-       u16 frag_num;
-       unsigned long packet_time;
-       struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
-                      x->u.rx_frame.stats.payload + \
-                      x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
-                      IWL_RX_HDR(x)->payload + \
-                      le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl3945-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-                                       struct ieee80211_hdr *hdr, int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
-                                      char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl3945-base.c
- *
- * NOTE:  The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_         <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_      <-- Called from work queue context
- * iwl3945_mac_     <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                           struct iwl_tx_queue *txq,
-                                           dma_addr_t addr, u16 len,
-                                           u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
-                                   struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
-                               struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
-                                struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
-                                 struct iwl_device_cmd *cmd,
-                                 struct ieee80211_tx_info *info,
-                                 struct ieee80211_hdr *hdr,
-                                 int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
-                                struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE:  This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl3945-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
-       const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
deleted file mode 100644 (file)
index f46c80e..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-#ifndef __iwl_4965_calib_h__
-#define __iwl_4965_calib_h__
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-commands.h"
-
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
-void iwl4965_init_sensitivity(struct iwl_priv *priv);
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
-void iwl4965_calib_free_results(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644 (file)
index 1c93665..0000000
+++ /dev/null
@@ -1,774 +0,0 @@
-/******************************************************************************
-*
-* GPL LICENSE SUMMARY
-*
-* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
-* USA
-*
-* The full GNU General Public License is included in this distribution
-* in the file called LICENSE.GPL.
-*
-* Contact Information:
-*  Intel Linux Wireless <ilw@linux.intel.com>
-* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*****************************************************************************/
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static const char *fmt_value = "  %-30s %10u\n";
-static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
-static const char *fmt_header =
-       "%-32s    current  cumulative       delta         max\n";
-
-static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
-       int p = 0;
-       u32 flag;
-
-       flag = le32_to_cpu(priv->_4965.statistics.flag);
-
-       p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
-       if (flag & UCODE_STATISTICS_CLEAR_MSK)
-               p += scnprintf(buf + p, bufsz - p,
-               "\tStatistics have been cleared\n");
-       p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
-               (flag & UCODE_STATISTICS_FREQUENCY_MSK)
-               ? "2.4 GHz" : "5.2 GHz");
-       p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
-               (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
-                ? "enabled" : "disabled");
-
-       return p;
-}
-
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-                               size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = sizeof(struct statistics_rx_phy) * 40 +
-                   sizeof(struct statistics_rx_non_phy) * 40 +
-                   sizeof(struct statistics_rx_ht_phy) * 40 + 400;
-       ssize_t ret;
-       struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
-       struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
-       struct statistics_rx_non_phy *general, *accum_general;
-       struct statistics_rx_non_phy *delta_general, *max_general;
-       struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /*
-        * the statistic information display here is based on
-        * the last statistics notification from uCode
-        * might not reflect the current uCode activity
-        */
-       ofdm = &priv->_4965.statistics.rx.ofdm;
-       cck = &priv->_4965.statistics.rx.cck;
-       general = &priv->_4965.statistics.rx.general;
-       ht = &priv->_4965.statistics.rx.ofdm_ht;
-       accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
-       accum_cck = &priv->_4965.accum_statistics.rx.cck;
-       accum_general = &priv->_4965.accum_statistics.rx.general;
-       accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
-       delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
-       delta_cck = &priv->_4965.delta_statistics.rx.cck;
-       delta_general = &priv->_4965.delta_statistics.rx.general;
-       delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
-       max_ofdm = &priv->_4965.max_delta.rx.ofdm;
-       max_cck = &priv->_4965.max_delta.rx.cck;
-       max_general = &priv->_4965.max_delta.rx.general;
-       max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
-
-       pos += iwl4965_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_header, "Statistics_Rx - OFDM:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "ina_cnt:",
-                        le32_to_cpu(ofdm->ina_cnt),
-                        accum_ofdm->ina_cnt,
-                        delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "fina_cnt:",
-                        le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
-                        delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "plcp_err:",
-                        le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
-                        delta_ofdm->plcp_err, max_ofdm->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "crc32_err:",
-                        le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
-                        delta_ofdm->crc32_err, max_ofdm->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "overrun_err:",
-                        le32_to_cpu(ofdm->overrun_err),
-                        accum_ofdm->overrun_err, delta_ofdm->overrun_err,
-                        max_ofdm->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "early_overrun_err:",
-                        le32_to_cpu(ofdm->early_overrun_err),
-                        accum_ofdm->early_overrun_err,
-                        delta_ofdm->early_overrun_err,
-                        max_ofdm->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "crc32_good:",
-                        le32_to_cpu(ofdm->crc32_good),
-                        accum_ofdm->crc32_good, delta_ofdm->crc32_good,
-                        max_ofdm->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "false_alarm_cnt:",
-                        le32_to_cpu(ofdm->false_alarm_cnt),
-                        accum_ofdm->false_alarm_cnt,
-                        delta_ofdm->false_alarm_cnt,
-                        max_ofdm->false_alarm_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "fina_sync_err_cnt:",
-                        le32_to_cpu(ofdm->fina_sync_err_cnt),
-                        accum_ofdm->fina_sync_err_cnt,
-                        delta_ofdm->fina_sync_err_cnt,
-                        max_ofdm->fina_sync_err_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sfd_timeout:",
-                        le32_to_cpu(ofdm->sfd_timeout),
-                        accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
-                        max_ofdm->sfd_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "fina_timeout:",
-                        le32_to_cpu(ofdm->fina_timeout),
-                        accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
-                        max_ofdm->fina_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "unresponded_rts:",
-                        le32_to_cpu(ofdm->unresponded_rts),
-                        accum_ofdm->unresponded_rts,
-                        delta_ofdm->unresponded_rts,
-                        max_ofdm->unresponded_rts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "rxe_frame_lmt_ovrun:",
-                        le32_to_cpu(ofdm->rxe_frame_limit_overrun),
-                        accum_ofdm->rxe_frame_limit_overrun,
-                        delta_ofdm->rxe_frame_limit_overrun,
-                        max_ofdm->rxe_frame_limit_overrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sent_ack_cnt:",
-                        le32_to_cpu(ofdm->sent_ack_cnt),
-                        accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
-                        max_ofdm->sent_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sent_cts_cnt:",
-                        le32_to_cpu(ofdm->sent_cts_cnt),
-                        accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
-                        max_ofdm->sent_cts_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sent_ba_rsp_cnt:",
-                        le32_to_cpu(ofdm->sent_ba_rsp_cnt),
-                        accum_ofdm->sent_ba_rsp_cnt,
-                        delta_ofdm->sent_ba_rsp_cnt,
-                        max_ofdm->sent_ba_rsp_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "dsp_self_kill:",
-                        le32_to_cpu(ofdm->dsp_self_kill),
-                        accum_ofdm->dsp_self_kill,
-                        delta_ofdm->dsp_self_kill,
-                        max_ofdm->dsp_self_kill);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "mh_format_err:",
-                        le32_to_cpu(ofdm->mh_format_err),
-                        accum_ofdm->mh_format_err,
-                        delta_ofdm->mh_format_err,
-                        max_ofdm->mh_format_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "re_acq_main_rssi_sum:",
-                        le32_to_cpu(ofdm->re_acq_main_rssi_sum),
-                        accum_ofdm->re_acq_main_rssi_sum,
-                        delta_ofdm->re_acq_main_rssi_sum,
-                        max_ofdm->re_acq_main_rssi_sum);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_header, "Statistics_Rx - CCK:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "ina_cnt:",
-                        le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
-                        delta_cck->ina_cnt, max_cck->ina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "fina_cnt:",
-                        le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
-                        delta_cck->fina_cnt, max_cck->fina_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "plcp_err:",
-                        le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
-                        delta_cck->plcp_err, max_cck->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "crc32_err:",
-                        le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
-                        delta_cck->crc32_err, max_cck->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "overrun_err:",
-                        le32_to_cpu(cck->overrun_err),
-                        accum_cck->overrun_err, delta_cck->overrun_err,
-                        max_cck->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "early_overrun_err:",
-                        le32_to_cpu(cck->early_overrun_err),
-                        accum_cck->early_overrun_err,
-                        delta_cck->early_overrun_err,
-                        max_cck->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "crc32_good:",
-                        le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
-                        delta_cck->crc32_good, max_cck->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "false_alarm_cnt:",
-                        le32_to_cpu(cck->false_alarm_cnt),
-                        accum_cck->false_alarm_cnt,
-                        delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "fina_sync_err_cnt:",
-                        le32_to_cpu(cck->fina_sync_err_cnt),
-                        accum_cck->fina_sync_err_cnt,
-                        delta_cck->fina_sync_err_cnt,
-                        max_cck->fina_sync_err_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sfd_timeout:",
-                        le32_to_cpu(cck->sfd_timeout),
-                        accum_cck->sfd_timeout, delta_cck->sfd_timeout,
-                        max_cck->sfd_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "fina_timeout:",
-                        le32_to_cpu(cck->fina_timeout),
-                        accum_cck->fina_timeout, delta_cck->fina_timeout,
-                        max_cck->fina_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "unresponded_rts:",
-                        le32_to_cpu(cck->unresponded_rts),
-                        accum_cck->unresponded_rts, delta_cck->unresponded_rts,
-                        max_cck->unresponded_rts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "rxe_frame_lmt_ovrun:",
-                        le32_to_cpu(cck->rxe_frame_limit_overrun),
-                        accum_cck->rxe_frame_limit_overrun,
-                        delta_cck->rxe_frame_limit_overrun,
-                        max_cck->rxe_frame_limit_overrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sent_ack_cnt:",
-                        le32_to_cpu(cck->sent_ack_cnt),
-                        accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
-                        max_cck->sent_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sent_cts_cnt:",
-                        le32_to_cpu(cck->sent_cts_cnt),
-                        accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
-                        max_cck->sent_cts_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sent_ba_rsp_cnt:",
-                        le32_to_cpu(cck->sent_ba_rsp_cnt),
-                        accum_cck->sent_ba_rsp_cnt,
-                        delta_cck->sent_ba_rsp_cnt,
-                        max_cck->sent_ba_rsp_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "dsp_self_kill:",
-                        le32_to_cpu(cck->dsp_self_kill),
-                        accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
-                        max_cck->dsp_self_kill);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "mh_format_err:",
-                        le32_to_cpu(cck->mh_format_err),
-                        accum_cck->mh_format_err, delta_cck->mh_format_err,
-                        max_cck->mh_format_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "re_acq_main_rssi_sum:",
-                        le32_to_cpu(cck->re_acq_main_rssi_sum),
-                        accum_cck->re_acq_main_rssi_sum,
-                        delta_cck->re_acq_main_rssi_sum,
-                        max_cck->re_acq_main_rssi_sum);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_header, "Statistics_Rx - GENERAL:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "bogus_cts:",
-                        le32_to_cpu(general->bogus_cts),
-                        accum_general->bogus_cts, delta_general->bogus_cts,
-                        max_general->bogus_cts);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "bogus_ack:",
-                        le32_to_cpu(general->bogus_ack),
-                        accum_general->bogus_ack, delta_general->bogus_ack,
-                        max_general->bogus_ack);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "non_bssid_frames:",
-                        le32_to_cpu(general->non_bssid_frames),
-                        accum_general->non_bssid_frames,
-                        delta_general->non_bssid_frames,
-                        max_general->non_bssid_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "filtered_frames:",
-                        le32_to_cpu(general->filtered_frames),
-                        accum_general->filtered_frames,
-                        delta_general->filtered_frames,
-                        max_general->filtered_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "non_channel_beacons:",
-                        le32_to_cpu(general->non_channel_beacons),
-                        accum_general->non_channel_beacons,
-                        delta_general->non_channel_beacons,
-                        max_general->non_channel_beacons);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "channel_beacons:",
-                        le32_to_cpu(general->channel_beacons),
-                        accum_general->channel_beacons,
-                        delta_general->channel_beacons,
-                        max_general->channel_beacons);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "num_missed_bcon:",
-                        le32_to_cpu(general->num_missed_bcon),
-                        accum_general->num_missed_bcon,
-                        delta_general->num_missed_bcon,
-                        max_general->num_missed_bcon);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "adc_rx_saturation_time:",
-                        le32_to_cpu(general->adc_rx_saturation_time),
-                        accum_general->adc_rx_saturation_time,
-                        delta_general->adc_rx_saturation_time,
-                        max_general->adc_rx_saturation_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "ina_detect_search_tm:",
-                        le32_to_cpu(general->ina_detection_search_time),
-                        accum_general->ina_detection_search_time,
-                        delta_general->ina_detection_search_time,
-                        max_general->ina_detection_search_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_silence_rssi_a:",
-                        le32_to_cpu(general->beacon_silence_rssi_a),
-                        accum_general->beacon_silence_rssi_a,
-                        delta_general->beacon_silence_rssi_a,
-                        max_general->beacon_silence_rssi_a);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_silence_rssi_b:",
-                        le32_to_cpu(general->beacon_silence_rssi_b),
-                        accum_general->beacon_silence_rssi_b,
-                        delta_general->beacon_silence_rssi_b,
-                        max_general->beacon_silence_rssi_b);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_silence_rssi_c:",
-                        le32_to_cpu(general->beacon_silence_rssi_c),
-                        accum_general->beacon_silence_rssi_c,
-                        delta_general->beacon_silence_rssi_c,
-                        max_general->beacon_silence_rssi_c);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "interference_data_flag:",
-                        le32_to_cpu(general->interference_data_flag),
-                        accum_general->interference_data_flag,
-                        delta_general->interference_data_flag,
-                        max_general->interference_data_flag);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "channel_load:",
-                        le32_to_cpu(general->channel_load),
-                        accum_general->channel_load,
-                        delta_general->channel_load,
-                        max_general->channel_load);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "dsp_false_alarms:",
-                        le32_to_cpu(general->dsp_false_alarms),
-                        accum_general->dsp_false_alarms,
-                        delta_general->dsp_false_alarms,
-                        max_general->dsp_false_alarms);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_rssi_a:",
-                        le32_to_cpu(general->beacon_rssi_a),
-                        accum_general->beacon_rssi_a,
-                        delta_general->beacon_rssi_a,
-                        max_general->beacon_rssi_a);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_rssi_b:",
-                        le32_to_cpu(general->beacon_rssi_b),
-                        accum_general->beacon_rssi_b,
-                        delta_general->beacon_rssi_b,
-                        max_general->beacon_rssi_b);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_rssi_c:",
-                        le32_to_cpu(general->beacon_rssi_c),
-                        accum_general->beacon_rssi_c,
-                        delta_general->beacon_rssi_c,
-                        max_general->beacon_rssi_c);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_energy_a:",
-                        le32_to_cpu(general->beacon_energy_a),
-                        accum_general->beacon_energy_a,
-                        delta_general->beacon_energy_a,
-                        max_general->beacon_energy_a);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_energy_b:",
-                        le32_to_cpu(general->beacon_energy_b),
-                        accum_general->beacon_energy_b,
-                        delta_general->beacon_energy_b,
-                        max_general->beacon_energy_b);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "beacon_energy_c:",
-                        le32_to_cpu(general->beacon_energy_c),
-                        accum_general->beacon_energy_c,
-                        delta_general->beacon_energy_c,
-                        max_general->beacon_energy_c);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_header, "Statistics_Rx - OFDM_HT:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "plcp_err:",
-                        le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
-                        delta_ht->plcp_err, max_ht->plcp_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "overrun_err:",
-                        le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
-                        delta_ht->overrun_err, max_ht->overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "early_overrun_err:",
-                        le32_to_cpu(ht->early_overrun_err),
-                        accum_ht->early_overrun_err,
-                        delta_ht->early_overrun_err,
-                        max_ht->early_overrun_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "crc32_good:",
-                        le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
-                        delta_ht->crc32_good, max_ht->crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "crc32_err:",
-                        le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
-                        delta_ht->crc32_err, max_ht->crc32_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "mh_format_err:",
-                        le32_to_cpu(ht->mh_format_err),
-                        accum_ht->mh_format_err,
-                        delta_ht->mh_format_err, max_ht->mh_format_err);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg_crc32_good:",
-                        le32_to_cpu(ht->agg_crc32_good),
-                        accum_ht->agg_crc32_good,
-                        delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg_mpdu_cnt:",
-                        le32_to_cpu(ht->agg_mpdu_cnt),
-                        accum_ht->agg_mpdu_cnt,
-                        delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg_cnt:",
-                        le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
-                        delta_ht->agg_cnt, max_ht->agg_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "unsupport_mcs:",
-                        le32_to_cpu(ht->unsupport_mcs),
-                        accum_ht->unsupport_mcs,
-                        delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
-                               char __user *user_buf,
-                               size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
-       ssize_t ret;
-       struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /* the statistic information display here is based on
-         * the last statistics notification from uCode
-         * might not reflect the current uCode activity
-         */
-       tx = &priv->_4965.statistics.tx;
-       accum_tx = &priv->_4965.accum_statistics.tx;
-       delta_tx = &priv->_4965.delta_statistics.tx;
-       max_tx = &priv->_4965.max_delta.tx;
-
-       pos += iwl4965_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_header, "Statistics_Tx:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "preamble:",
-                        le32_to_cpu(tx->preamble_cnt),
-                        accum_tx->preamble_cnt,
-                        delta_tx->preamble_cnt, max_tx->preamble_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "rx_detected_cnt:",
-                        le32_to_cpu(tx->rx_detected_cnt),
-                        accum_tx->rx_detected_cnt,
-                        delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "bt_prio_defer_cnt:",
-                        le32_to_cpu(tx->bt_prio_defer_cnt),
-                        accum_tx->bt_prio_defer_cnt,
-                        delta_tx->bt_prio_defer_cnt,
-                        max_tx->bt_prio_defer_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "bt_prio_kill_cnt:",
-                        le32_to_cpu(tx->bt_prio_kill_cnt),
-                        accum_tx->bt_prio_kill_cnt,
-                        delta_tx->bt_prio_kill_cnt,
-                        max_tx->bt_prio_kill_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "few_bytes_cnt:",
-                        le32_to_cpu(tx->few_bytes_cnt),
-                        accum_tx->few_bytes_cnt,
-                        delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "cts_timeout:",
-                        le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
-                        delta_tx->cts_timeout, max_tx->cts_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "ack_timeout:",
-                        le32_to_cpu(tx->ack_timeout),
-                        accum_tx->ack_timeout,
-                        delta_tx->ack_timeout, max_tx->ack_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "expected_ack_cnt:",
-                        le32_to_cpu(tx->expected_ack_cnt),
-                        accum_tx->expected_ack_cnt,
-                        delta_tx->expected_ack_cnt,
-                        max_tx->expected_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "actual_ack_cnt:",
-                        le32_to_cpu(tx->actual_ack_cnt),
-                        accum_tx->actual_ack_cnt,
-                        delta_tx->actual_ack_cnt,
-                        max_tx->actual_ack_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "dump_msdu_cnt:",
-                        le32_to_cpu(tx->dump_msdu_cnt),
-                        accum_tx->dump_msdu_cnt,
-                        delta_tx->dump_msdu_cnt,
-                        max_tx->dump_msdu_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "abort_nxt_frame_mismatch:",
-                        le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
-                        accum_tx->burst_abort_next_frame_mismatch_cnt,
-                        delta_tx->burst_abort_next_frame_mismatch_cnt,
-                        max_tx->burst_abort_next_frame_mismatch_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "abort_missing_nxt_frame:",
-                        le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
-                        accum_tx->burst_abort_missing_next_frame_cnt,
-                        delta_tx->burst_abort_missing_next_frame_cnt,
-                        max_tx->burst_abort_missing_next_frame_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "cts_timeout_collision:",
-                        le32_to_cpu(tx->cts_timeout_collision),
-                        accum_tx->cts_timeout_collision,
-                        delta_tx->cts_timeout_collision,
-                        max_tx->cts_timeout_collision);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "ack_ba_timeout_collision:",
-                        le32_to_cpu(tx->ack_or_ba_timeout_collision),
-                        accum_tx->ack_or_ba_timeout_collision,
-                        delta_tx->ack_or_ba_timeout_collision,
-                        max_tx->ack_or_ba_timeout_collision);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg ba_timeout:",
-                        le32_to_cpu(tx->agg.ba_timeout),
-                        accum_tx->agg.ba_timeout,
-                        delta_tx->agg.ba_timeout,
-                        max_tx->agg.ba_timeout);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg ba_resched_frames:",
-                        le32_to_cpu(tx->agg.ba_reschedule_frames),
-                        accum_tx->agg.ba_reschedule_frames,
-                        delta_tx->agg.ba_reschedule_frames,
-                        max_tx->agg.ba_reschedule_frames);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg scd_query_agg_frame:",
-                        le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
-                        accum_tx->agg.scd_query_agg_frame_cnt,
-                        delta_tx->agg.scd_query_agg_frame_cnt,
-                        max_tx->agg.scd_query_agg_frame_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg scd_query_no_agg:",
-                        le32_to_cpu(tx->agg.scd_query_no_agg),
-                        accum_tx->agg.scd_query_no_agg,
-                        delta_tx->agg.scd_query_no_agg,
-                        max_tx->agg.scd_query_no_agg);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg scd_query_agg:",
-                        le32_to_cpu(tx->agg.scd_query_agg),
-                        accum_tx->agg.scd_query_agg,
-                        delta_tx->agg.scd_query_agg,
-                        max_tx->agg.scd_query_agg);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg scd_query_mismatch:",
-                        le32_to_cpu(tx->agg.scd_query_mismatch),
-                        accum_tx->agg.scd_query_mismatch,
-                        delta_tx->agg.scd_query_mismatch,
-                        max_tx->agg.scd_query_mismatch);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg frame_not_ready:",
-                        le32_to_cpu(tx->agg.frame_not_ready),
-                        accum_tx->agg.frame_not_ready,
-                        delta_tx->agg.frame_not_ready,
-                        max_tx->agg.frame_not_ready);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg underrun:",
-                        le32_to_cpu(tx->agg.underrun),
-                        accum_tx->agg.underrun,
-                        delta_tx->agg.underrun, max_tx->agg.underrun);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg bt_prio_kill:",
-                        le32_to_cpu(tx->agg.bt_prio_kill),
-                        accum_tx->agg.bt_prio_kill,
-                        delta_tx->agg.bt_prio_kill,
-                        max_tx->agg.bt_prio_kill);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "agg rx_ba_rsp_cnt:",
-                        le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
-                        accum_tx->agg.rx_ba_rsp_cnt,
-                        delta_tx->agg.rx_ba_rsp_cnt,
-                        max_tx->agg.rx_ba_rsp_cnt);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
-                                    size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char *buf;
-       int bufsz = sizeof(struct statistics_general) * 10 + 300;
-       ssize_t ret;
-       struct statistics_general_common *general, *accum_general;
-       struct statistics_general_common *delta_general, *max_general;
-       struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
-       struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       /* the statistic information display here is based on
-         * the last statistics notification from uCode
-         * might not reflect the current uCode activity
-         */
-       general = &priv->_4965.statistics.general.common;
-       dbg = &priv->_4965.statistics.general.common.dbg;
-       div = &priv->_4965.statistics.general.common.div;
-       accum_general = &priv->_4965.accum_statistics.general.common;
-       accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
-       accum_div = &priv->_4965.accum_statistics.general.common.div;
-       delta_general = &priv->_4965.delta_statistics.general.common;
-       max_general = &priv->_4965.max_delta.general.common;
-       delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
-       max_dbg = &priv->_4965.max_delta.general.common.dbg;
-       delta_div = &priv->_4965.delta_statistics.general.common.div;
-       max_div = &priv->_4965.max_delta.general.common.div;
-
-       pos += iwl4965_statistics_flag(priv, buf, bufsz);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_header, "Statistics_General:");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_value, "temperature:",
-                        le32_to_cpu(general->temperature));
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_value, "ttl_timestamp:",
-                        le32_to_cpu(general->ttl_timestamp));
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "burst_check:",
-                        le32_to_cpu(dbg->burst_check),
-                        accum_dbg->burst_check,
-                        delta_dbg->burst_check, max_dbg->burst_check);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "burst_count:",
-                        le32_to_cpu(dbg->burst_count),
-                        accum_dbg->burst_count,
-                        delta_dbg->burst_count, max_dbg->burst_count);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "wait_for_silence_timeout_count:",
-                        le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
-                        accum_dbg->wait_for_silence_timeout_cnt,
-                        delta_dbg->wait_for_silence_timeout_cnt,
-                        max_dbg->wait_for_silence_timeout_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "sleep_time:",
-                        le32_to_cpu(general->sleep_time),
-                        accum_general->sleep_time,
-                        delta_general->sleep_time, max_general->sleep_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "slots_out:",
-                        le32_to_cpu(general->slots_out),
-                        accum_general->slots_out,
-                        delta_general->slots_out, max_general->slots_out);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "slots_idle:",
-                        le32_to_cpu(general->slots_idle),
-                        accum_general->slots_idle,
-                        delta_general->slots_idle, max_general->slots_idle);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "tx_on_a:",
-                        le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
-                        delta_div->tx_on_a, max_div->tx_on_a);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "tx_on_b:",
-                        le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
-                        delta_div->tx_on_b, max_div->tx_on_b);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "exec_time:",
-                        le32_to_cpu(div->exec_time), accum_div->exec_time,
-                        delta_div->exec_time, max_div->exec_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "probe_time:",
-                        le32_to_cpu(div->probe_time), accum_div->probe_time,
-                        delta_div->probe_time, max_div->probe_time);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "rx_enable_counter:",
-                        le32_to_cpu(general->rx_enable_counter),
-                        accum_general->rx_enable_counter,
-                        delta_general->rx_enable_counter,
-                        max_general->rx_enable_counter);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                        fmt_table, "num_of_sos_states:",
-                        le32_to_cpu(general->num_of_sos_states),
-                        accum_general->num_of_sos_states,
-                        delta_general->num_of_sos_states,
-                        max_general->num_of_sos_states);
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644 (file)
index 6c8e353..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-                               size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-                               size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_general_stats_read(struct file *file,
-                       char __user *user_buf, size_t count, loff_t *ppos);
-#else
-static ssize_t
-iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
-{
-       return 0;
-}
-static ssize_t
-iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
-{
-       return 0;
-}
-static ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
-                                           size_t count, loff_t *ppos)
-{
-       return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644 (file)
index cb9baab..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-4965.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
-       u16 count;
-       int ret;
-
-       for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
-               /* Request semaphore */
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-               /* See if we got it */
-               ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
-                               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
-                               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
-                               EEPROM_SEM_TIMEOUT);
-               if (ret >= 0) {
-                       IWL_DEBUG_IO(priv,
-                               "Acquired semaphore after %d tries.\n",
-                               count+1);
-                       return ret;
-               }
-       }
-
-       return ret;
-}
-
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
-{
-       iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
-               CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-int iwl4965_eeprom_check_version(struct iwl_priv *priv)
-{
-       u16 eeprom_ver;
-       u16 calib_ver;
-
-       eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
-       calib_ver = iwl_legacy_eeprom_query16(priv,
-                       EEPROM_4965_CALIB_VERSION_OFFSET);
-
-       if (eeprom_ver < priv->cfg->eeprom_ver ||
-           calib_ver < priv->cfg->eeprom_calib_ver)
-               goto err;
-
-       IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
-                eeprom_ver, calib_ver);
-
-       return 0;
-err:
-       IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
-                 "CALIB=0x%x < 0x%x\n",
-                 eeprom_ver, priv->cfg->eeprom_ver,
-                 calib_ver,  priv->cfg->eeprom_calib_ver);
-       return -EINVAL;
-
-}
-
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
-       const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
-                                       EEPROM_MAC_ADDRESS);
-       memcpy(mac, addr, ETH_ALEN);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644 (file)
index fc6fa28..0000000
+++ /dev/null
@@ -1,811 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE                        1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE        7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND             (0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND             (0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND             (0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND             (0x80A000)
-
-#define IWL49_RTC_INST_SIZE  (IWL49_RTC_INST_UPPER_BOUND - \
-                               IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE  (IWL49_RTC_DATA_UPPER_BOUND - \
-                               IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
-       return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
-              (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent).  The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp).  After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
- *        must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN  (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX  (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
-       (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
-        ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- *     1) EEPROM
- *     2) "initialize" alive notification
- *     3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1)  Regulatory information (max txpower and channel usage flags) is provided
- *     separately for each channel that can possibly supported by 4965.
- *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- *     (legacy) channels.
- *
- *     See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- *     for locations in EEPROM.
- *
- * 2)  Factory txpower calibration information is provided separately for
- *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
- *     but 5 GHz has several sub-bands.
- *
- *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- *     See struct iwl4965_eeprom_calib_info (and the tree of structures
- *     contained within it) for format, and struct iwl4965_eeprom for
- *     locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1)  Temperature calculation parameters.
- *
- * 2)  Power supply voltage measurement.
- *
- * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1)  Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- *     Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- *     If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- *     2 transmitters will be used simultaneously; driver must reduce the
- *     regulatory limit by 3 dB (half-power) for each transmitter, so the
- *     combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
- *     reduce target txpower if necessary.
- *
- *     Backoff values below are in 1/2 dB units (equivalent to steps in
- *     txpower gain tables):
- *
- *     OFDM 6 - 36 MBit:  10 steps (5 dB)
- *     OFDM 48 MBit:      15 steps (7.5 dB)
- *     OFDM 54 MBit:      17 steps (8.5 dB)
- *     OFDM 60 MBit:      20 steps (10 dB)
- *     CCK all rates:     10 steps (5 dB)
- *
- *     Backoff values apply to saturation txpower on a per-transmitter basis;
- *     when using MIMO (2 transmitters), each transmitter uses the same
- *     saturation level provided in EEPROM, and the same backoff values;
- *     no reduction (such as with regulatory txpower limits) is required.
- *
- *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- *     factory measurement for ht40 channels.
- *
- *     The result of this step is the final target txpower.  The rest of
- *     the steps figure out the proper settings for the device to achieve
- *     that target txpower.
- *
- *
- * 3)  Determine (EEPROM) calibration sub band for the target channel, by
- *     comparing against first and last channels in each sub band
- *     (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
- *     referencing the 2 factory-measured (sample) channels within the sub band.
- *
- *     Interpolation is based on difference between target channel's frequency
- *     and the sample channels' frequencies.  Since channel numbers are based
- *     on frequency (5 MHz between each channel number), this is equivalent
- *     to interpolating based on channel number differences.
- *
- *     Note that the sample channels may or may not be the channels at the
- *     edges of the sub band.  The target channel may be "outside" of the
- *     span of the sampled channels.
- *
- *     Driver may choose the pair (for 2 Tx chains) of measurements (see
- *     struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- *     txpower comes closest to the desired txpower.  Usually, though,
- *     the middle set of measurements is closest to the regulatory limits,
- *     and is therefore a good choice for all txpower calculations (this
- *     assumes that high accuracy is needed for maximizing legal txpower,
- *     while lower txpower configurations do not need as much accuracy).
- *
- *     Driver should interpolate both members of the chosen measurement pair,
- *     i.e. for both Tx chains (radio transmitters), unless the driver knows
- *     that only one of the chains will be used (e.g. only one tx antenna
- *     connected, but this should be unusual).  The rate scaling algorithm
- *     switches antennas to find best performance, so both Tx chains will
- *     be used (although only one at a time) even for non-MIMO transmissions.
- *
- *     Driver should interpolate factory values for temperature, gain table
- *     index, and actual power.  The power amplifier detector values are
- *     not used by the driver.
- *
- *     Sanity check:  If the target channel happens to be one of the sample
- *     channels, the results should agree with the sample channel's
- *     measurements!
- *
- *
- * 5)  Find difference between desired txpower and (interpolated)
- *     factory-measured txpower.  Using (interpolated) factory gain table index
- *     (shown elsewhere) as a starting point, adjust this index lower to
- *     increase txpower, or higher to decrease txpower, until the target
- *     txpower is reached.  Each step in the gain table is 1/2 dB.
- *
- *     For example, if factory measured txpower is 16 dBm, and target txpower
- *     is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- *     by 3 dB.
- *
- *
- * 6)  Find difference between current device temperature and (interpolated)
- *     factory-measured temperature for sub-band.  Factory values are in
- *     degrees Celsius.  To calculate current temperature, see comments for
- *     "4965 temperature calculation".
- *
- *     If current temperature is higher than factory temperature, driver must
- *     increase gain (lower gain table index), and vice verse.
- *
- *     Temperature affects gain differently for different channels:
- *
- *     2.4 GHz all channels:  3.5 degrees per half-dB step
- *     5 GHz channels 34-43:  4.5 degrees per half-dB step
- *     5 GHz channels >= 44:  4.0 degrees per half-dB step
- *
- *     NOTE:  Temperature can increase rapidly when transmitting, especially
- *            with heavy traffic at high txpowers.  Driver should update
- *            temperature calculations often under these conditions to
- *            maintain strong txpower in the face of rising temperature.
- *
- *
- * 7)  Find difference between current power supply voltage indicator
- *     (from "initialize alive") and factory-measured power supply voltage
- *     indicator (EEPROM).
- *
- *     If the current voltage is higher (indicator is lower) than factory
- *     voltage, gain should be reduced (gain table index increased) by:
- *
- *     (eeprom - current) / 7
- *
- *     If the current voltage is lower (indicator is higher) than factory
- *     voltage, gain should be increased (gain table index decreased) by:
- *
- *     2 * (current - eeprom) / 7
- *
- *     If number of index steps in either direction turns out to be > 2,
- *     something is wrong ... just use 0.
- *
- *     NOTE:  Voltage compensation is independent of band/channel.
- *
- *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
- *            to be constant after this initial measurement.  Voltage
- *            compensation for txpower (number of steps in gain table)
- *            may be calculated once and used until the next uCode bootload.
- *
- *
- * 8)  If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- *     adjust txpower for each transmitter chain, so txpower is balanced
- *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
- *     values in "initialize alive", one pair for each of 5 channel ranges:
- *
- *     Group 0:  5 GHz channel 34-43
- *     Group 1:  5 GHz channel 44-70
- *     Group 2:  5 GHz channel 71-124
- *     Group 3:  5 GHz channel 125-200
- *     Group 4:  2.4 GHz all channels
- *
- *     Add the tx_atten[group][chain] value to the index for the target chain.
- *     The values are signed, but are in pairs of 0 and a non-negative number,
- *     so as to reduce gain (if necessary) of the "hotter" channel.  This
- *     avoids any need to double-check for regulatory compliance after
- *     this step.
- *
- *
- * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
- *     value to the index:
- *
- *     Hardware rev B:  9 steps (4.5 dB)
- *     Hardware rev C:  5 steps (2.5 dB)
- *
- *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- *     bits [3:2], 1 = B, 2 = C.
- *
- *     NOTE:  This compensation is in addition to any saturation backoff that
- *            might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- *     Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- *     location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device.  That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V   (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
- * are *relative* steps, not indications of absolute output power.  Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
- *     linear value that multiplies the output of the digital signal processor,
- *     before being sent to the analog radio.
- * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
- *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower.  This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration).  A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX              (0)  /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT    (-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index    Dsp gain   Radio gain
- *   0        110         0x3f      (highest gain)
- *   1        104         0x3f
- *   2         98         0x3f
- *   3        110         0x3e
- *   4        104         0x3e
- *   5         98         0x3e
- *   6        110         0x3d
- *   7        104         0x3d
- *   8         98         0x3d
- *   9        110         0x3c
- *  10        104         0x3c
- *  11         98         0x3c
- *  12        110         0x3b
- *  13        104         0x3b
- *  14         98         0x3b
- *  15        110         0x3a
- *  16        104         0x3a
- *  17         98         0x3a
- *  18        110         0x39
- *  19        104         0x39
- *  20         98         0x39
- *  21        110         0x38
- *  22        104         0x38
- *  23         98         0x38
- *  24        110         0x37
- *  25        104         0x37
- *  26         98         0x37
- *  27        110         0x36
- *  28        104         0x36
- *  29         98         0x36
- *  30        110         0x35
- *  31        104         0x35
- *  32         98         0x35
- *  33        110         0x34
- *  34        104         0x34
- *  35         98         0x34
- *  36        110         0x33
- *  37        104         0x33
- *  38         98         0x33
- *  39        110         0x32
- *  40        104         0x32
- *  41         98         0x32
- *  42        110         0x31
- *  43        104         0x31
- *  44         98         0x31
- *  45        110         0x30
- *  46        104         0x30
- *  47         98         0x30
- *  48        110          0x6
- *  49        104          0x6
- *  50         98          0x6
- *  51        110          0x5
- *  52        104          0x5
- *  53         98          0x5
- *  54        110          0x4
- *  55        104          0x4
- *  56         98          0x4
- *  57        110          0x3
- *  58        104          0x3
- *  59         98          0x3
- *  60        110          0x2
- *  61        104          0x2
- *  62         98          0x2
- *  63        110          0x1
- *  64        104          0x1
- *  65         98          0x1
- *  66        110          0x0
- *  67        104          0x0
- *  68         98          0x0
- *  69         97            0
- *  70         96            0
- *  71         95            0
- *  72         94            0
- *  73         93            0
- *  74         92            0
- *  75         91            0
- *  76         90            0
- *  77         89            0
- *  78         88            0
- *  79         87            0
- *  80         86            0
- *  81         85            0
- *  82         84            0
- *  83         83            0
- *  84         82            0
- *  85         81            0
- *  86         80            0
- *  87         79            0
- *  88         78            0
- *  89         77            0
- *  90         76            0
- *  91         75            0
- *  92         74            0
- *  93         73            0
- *  94         72            0
- *  95         71            0
- *  96         70            0
- *  97         69            0
- *  98         68            0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index    Dsp gain   Radio gain
- *  -9               123         0x3F      (highest gain)
- *  -8               117         0x3F
- *  -7        110         0x3F
- *  -6        104         0x3F
- *  -5         98         0x3F
- *  -4        110         0x3E
- *  -3        104         0x3E
- *  -2         98         0x3E
- *  -1        110         0x3D
- *   0        104         0x3D
- *   1         98         0x3D
- *   2        110         0x3C
- *   3        104         0x3C
- *   4         98         0x3C
- *   5        110         0x3B
- *   6        104         0x3B
- *   7         98         0x3B
- *   8        110         0x3A
- *   9        104         0x3A
- *  10         98         0x3A
- *  11        110         0x39
- *  12        104         0x39
- *  13         98         0x39
- *  14        110         0x38
- *  15        104         0x38
- *  16         98         0x38
- *  17        110         0x37
- *  18        104         0x37
- *  19         98         0x37
- *  20        110         0x36
- *  21        104         0x36
- *  22         98         0x36
- *  23        110         0x35
- *  24        104         0x35
- *  25         98         0x35
- *  26        110         0x34
- *  27        104         0x34
- *  28         98         0x34
- *  29        110         0x33
- *  30        104         0x33
- *  31         98         0x33
- *  32        110         0x32
- *  33        104         0x32
- *  34         98         0x32
- *  35        110         0x31
- *  36        104         0x31
- *  37         98         0x31
- *  38        110         0x30
- *  39        104         0x30
- *  40         98         0x30
- *  41        110         0x25
- *  42        104         0x25
- *  43         98         0x25
- *  44        110         0x24
- *  45        104         0x24
- *  46         98         0x24
- *  47        110         0x23
- *  48        104         0x23
- *  49         98         0x23
- *  50        110         0x22
- *  51        104         0x18
- *  52         98         0x18
- *  53        110         0x17
- *  54        104         0x17
- *  55         98         0x17
- *  56        110         0x16
- *  57        104         0x16
- *  58         98         0x16
- *  59        110         0x15
- *  60        104         0x15
- *  61         98         0x15
- *  62        110         0x14
- *  63        104         0x14
- *  64         98         0x14
- *  65        110         0x13
- *  66        104         0x13
- *  67         98         0x13
- *  68        110         0x12
- *  69        104         0x08
- *  70         98         0x08
- *  71        110         0x07
- *  72        104         0x07
- *  73         98         0x07
- *  74        110         0x06
- *  75        104         0x06
- *  76         98         0x06
- *  77        110         0x05
- *  78        104         0x05
- *  79         98         0x05
- *  80        110         0x04
- *  81        104         0x04
- *  82         98         0x04
- *  83        110         0x03
- *  84        104         0x03
- *  85         98         0x03
- *  86        110         0x02
- *  87        104         0x02
- *  88         98         0x02
- *  89        110         0x01
- *  90        104         0x01
- *  91         98         0x01
- *  92        110         0x00
- *  93        104         0x00
- *  94         98         0x00
- *  95         93         0x00
- *  96         88         0x00
- *  97         83         0x00
- *  98         78         0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated.  These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24   (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52   (34)
-#define IWL_TX_POWER_REGULATORY_MIN          (0)
-#define IWL_TX_POWER_REGULATORY_MAX          (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion.  This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24   (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52   (38)
-#define IWL_TX_POWER_SATURATION_MIN          (20)
-#define IWL_TX_POWER_SATURATION_MAX          (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain.  Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below.  If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
-       CALIB_CH_GROUP_1 = 0,
-       CALIB_CH_GROUP_2 = 1,
-       CALIB_CH_GROUP_3 = 2,
-       CALIB_CH_GROUP_4 = 3,
-       CALIB_CH_GROUP_5 = 4,
-       CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues).  All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device.  When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS        7
-#define IWL49_CMD_FIFO_NUM     4
-#define IWL49_NUM_QUEUES       16
-#define IWL49_NUM_AMPDU_QUEUES 8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue.  If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
-       __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
-       u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-
-#define IWL4965_RTC_INST_LOWER_BOUND           (0x000000)
-
-/* RSSI to dBm */
-#define IWL4965_RSSI_OFFSET    44
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT  0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN   0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN    0x02
-
-#define IWL4965_DEFAULT_TX_RETRY  15
-
-/* EEPROM */
-#define IWL4965_FIRST_AMPDU_QUEUE      10
-
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644 (file)
index 6862fdc..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-4965-led.h"
-
-/* Send led command */
-static int
-iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
-{
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_LEDS_CMD,
-               .len = sizeof(struct iwl_led_cmd),
-               .data = led_cmd,
-               .flags = CMD_ASYNC,
-               .callback = NULL,
-       };
-       u32 reg;
-
-       reg = iwl_read32(priv, CSR_LED_REG);
-       if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
-               iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
-
-       return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-/* Set led register off */
-void iwl4965_led_enable(struct iwl_priv *priv)
-{
-       iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-}
-
-const struct iwl_led_ops iwl4965_led_ops = {
-       .cmd = iwl4965_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644 (file)
index 5ed3615..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_4965_led_h__
-#define __iwl_4965_led_h__
-
-extern const struct iwl_led_ops iwl4965_led_ops;
-void iwl4965_led_enable(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644 (file)
index 2be6d9e..0000000
+++ /dev/null
@@ -1,1194 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-sta.h"
-
-void iwl4965_check_abort_status(struct iwl_priv *priv,
-                           u8 frame_count, u32 status)
-{
-       if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
-               IWL_ERR(priv, "Tx flush command to flush out all frames\n");
-               if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
-                       queue_work(priv->workqueue, &priv->tx_flush);
-       }
-}
-
-/*
- * EEPROM
- */
-struct iwl_mod_params iwl4965_mod_params = {
-       .amsdu_size_8K = 1,
-       .restart_fw = 1,
-       /* the rest are 0 by default */
-};
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       unsigned long flags;
-       int i;
-       spin_lock_irqsave(&rxq->lock, flags);
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
-       /* Fill the rx_used queue with _all_ of the Rx buffers */
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
-               /* In the reset function, these buffers may have been allocated
-                * to an SKB, so we need to unmap and free potential storage */
-               if (rxq->pool[i].page != NULL) {
-                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-       }
-
-       for (i = 0; i < RX_QUEUE_SIZE; i++)
-               rxq->queue[i] = NULL;
-
-       /* Set us so that we have processed and used all buffers, but have
-        * not restocked the Rx queue with fresh buffers */
-       rxq->read = rxq->write = 0;
-       rxq->write_actual = 0;
-       rxq->free_count = 0;
-       spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       u32 rb_size;
-       const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
-       u32 rb_timeout = 0;
-
-       if (priv->cfg->mod_params->amsdu_size_8K)
-               rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
-       else
-               rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
-       /* Stop Rx DMA */
-       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
-       /* Reset driver's Rx queue write index */
-       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
-       /* Tell device where to find RBD circular buffer in DRAM */
-       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-                          (u32)(rxq->bd_dma >> 8));
-
-       /* Tell device where in DRAM to update its Rx status */
-       iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
-                          rxq->rb_stts_dma >> 4);
-
-       /* Enable Rx DMA
-        * Direct rx interrupts to hosts
-        * Rx buffer size 4 or 8k
-        * RB timeout 0x10
-        * 256 RBDs
-        */
-       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
-                          FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
-                          FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
-                          FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
-                          rb_size|
-                          (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
-                          (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
-       /* Set interrupt coalescing timer to default (2048 usecs) */
-       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
-       return 0;
-}
-
-static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
-               if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
-                       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-                                              APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
-                                              ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
-       iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
-                              APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
-                              ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwl4965_hw_nic_init(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       int ret;
-
-       /* nic_init */
-       spin_lock_irqsave(&priv->lock, flags);
-       priv->cfg->ops->lib->apm_ops.init(priv);
-
-       /* Set interrupt coalescing calibration timer to default (512 usecs) */
-       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl4965_set_pwr_vmain(priv);
-
-       priv->cfg->ops->lib->apm_ops.config(priv);
-
-       /* Allocate the RX queue, or reset if it is already allocated */
-       if (!rxq->bd) {
-               ret = iwl_legacy_rx_queue_alloc(priv);
-               if (ret) {
-                       IWL_ERR(priv, "Unable to initialize Rx queue\n");
-                       return -ENOMEM;
-               }
-       } else
-               iwl4965_rx_queue_reset(priv, rxq);
-
-       iwl4965_rx_replenish(priv);
-
-       iwl4965_rx_init(priv, rxq);
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       rxq->need_update = 1;
-       iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Allocate or reset and init all Tx and Command queues */
-       if (!priv->txq) {
-               ret = iwl4965_txq_ctx_alloc(priv);
-               if (ret)
-                       return ret;
-       } else
-               iwl4965_txq_ctx_reset(priv);
-
-       set_bit(STATUS_INIT, &priv->status);
-
-       return 0;
-}
-
-/**
- * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
-                                         dma_addr_t dma_addr)
-{
-       return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct list_head *element;
-       struct iwl_rx_mem_buffer *rxb;
-       unsigned long flags;
-
-       spin_lock_irqsave(&rxq->lock, flags);
-       while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
-               /* The overwritten rxb must be a used one */
-               rxb = rxq->queue[rxq->write];
-               BUG_ON(rxb && rxb->page);
-
-               /* Get next free Rx buffer, remove from free list */
-               element = rxq->rx_free.next;
-               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-               list_del(element);
-
-               /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
-                                                             rxb->page_dma);
-               rxq->queue[rxq->write] = rxb;
-               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
-               rxq->free_count--;
-       }
-       spin_unlock_irqrestore(&rxq->lock, flags);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               queue_work(priv->workqueue, &priv->rx_replenish);
-
-
-       /* If we've added more space for the firmware to place data, tell it.
-        * Increment device's write pointer in multiples of 8. */
-       if (rxq->write_actual != (rxq->write & ~0x7)) {
-               spin_lock_irqsave(&rxq->lock, flags);
-               rxq->need_update = 1;
-               spin_unlock_irqrestore(&rxq->lock, flags);
-               iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-       }
-}
-
-/**
- * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct list_head *element;
-       struct iwl_rx_mem_buffer *rxb;
-       struct page *page;
-       unsigned long flags;
-       gfp_t gfp_mask = priority;
-
-       while (1) {
-               spin_lock_irqsave(&rxq->lock, flags);
-               if (list_empty(&rxq->rx_used)) {
-                       spin_unlock_irqrestore(&rxq->lock, flags);
-                       return;
-               }
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (priv->hw_params.rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
-               /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(priv, "alloc_pages failed, "
-                                              "order: %d\n",
-                                              priv->hw_params.rx_page_order);
-
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(priv,
-                                       "Failed to alloc_pages with %s. "
-                                       "Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?
-                                                "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
-                       return;
-               }
-
-               spin_lock_irqsave(&rxq->lock, flags);
-
-               if (list_empty(&rxq->rx_used)) {
-                       spin_unlock_irqrestore(&rxq->lock, flags);
-                       __free_pages(page, priv->hw_params.rx_page_order);
-                       return;
-               }
-               element = rxq->rx_used.next;
-               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-               list_del(element);
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               BUG_ON(rxb->page);
-               rxb->page = page;
-               /* Get physical address of the RB */
-               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-               /* dma address must be no more than 36 bits */
-               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-               /* and also 256 byte aligned! */
-               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
-               spin_lock_irqsave(&rxq->lock, flags);
-
-               list_add_tail(&rxb->list, &rxq->rx_free);
-               rxq->free_count++;
-               priv->alloc_rxb_page++;
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-       }
-}
-
-void iwl4965_rx_replenish(struct iwl_priv *priv)
-{
-       unsigned long flags;
-
-       iwl4965_rx_allocate(priv, GFP_KERNEL);
-
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl4965_rx_queue_restock(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwl4965_rx_replenish_now(struct iwl_priv *priv)
-{
-       iwl4965_rx_allocate(priv, GFP_ATOMIC);
-
-       iwl4965_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       int i;
-       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-               if (rxq->pool[i].page != NULL) {
-                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-       }
-
-       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-                         rxq->bd_dma);
-       dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
-                         rxq->rb_stts, rxq->rb_stts_dma);
-       rxq->bd = NULL;
-       rxq->rb_stts  = NULL;
-}
-
-int iwl4965_rxq_stop(struct iwl_priv *priv)
-{
-
-       /* stop Rx DMA */
-       iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-       iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
-                           FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
-       return 0;
-}
-
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
-       int idx = 0;
-       int band_offset = 0;
-
-       /* HT rate format: mac80211 wants an MCS number, which is just LSB */
-       if (rate_n_flags & RATE_MCS_HT_MSK) {
-               idx = (rate_n_flags & 0xff);
-               return idx;
-       /* Legacy rate format, search for match in table */
-       } else {
-               if (band == IEEE80211_BAND_5GHZ)
-                       band_offset = IWL_FIRST_OFDM_RATE;
-               for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
-                       if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
-                               return idx - band_offset;
-       }
-
-       return -1;
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
-                            struct iwl_rx_phy_res *rx_resp)
-{
-       /* data from PHY/DSP regarding signal strength, etc.,
-        *   contents are always there, not configurable by host.  */
-       struct iwl4965_rx_non_cfg_phy *ncphy =
-           (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
-       u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
-                       >> IWL49_AGC_DB_POS;
-
-       u32 valid_antennae =
-           (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
-                       >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
-       u8 max_rssi = 0;
-       u32 i;
-
-       /* Find max rssi among 3 possible receivers.
-        * These values are measured by the digital signal processor (DSP).
-        * They should stay fairly constant even as the signal strength varies,
-        *   if the radio's automatic gain control (AGC) is working right.
-        * AGC value (see below) will provide the "interesting" info. */
-       for (i = 0; i < 3; i++)
-               if (valid_antennae & (1 << i))
-                       max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
-       IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
-               ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
-               max_rssi, agc);
-
-       /* dBm = max_rssi dB - agc dB - constant.
-        * Higher AGC (higher radio gain) means lower signal. */
-       return max_rssi - agc - IWL4965_RSSI_OFFSET;
-}
-
-
-static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
-       u32 decrypt_out = 0;
-
-       if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
-                                       RX_RES_STATUS_STATION_FOUND)
-               decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
-                               RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
-       decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
-       /* packet was not encrypted */
-       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
-                                       RX_RES_STATUS_SEC_TYPE_NONE)
-               return decrypt_out;
-
-       /* packet was encrypted with unknown alg */
-       if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
-                                       RX_RES_STATUS_SEC_TYPE_ERR)
-               return decrypt_out;
-
-       /* decryption was not done in HW */
-       if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
-                                       RX_MPDU_RES_STATUS_DEC_DONE_MSK)
-               return decrypt_out;
-
-       switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
-       case RX_RES_STATUS_SEC_TYPE_CCMP:
-               /* alg is CCM: check MIC only */
-               if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
-                       /* Bad MIC */
-                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
-               else
-                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
-               break;
-
-       case RX_RES_STATUS_SEC_TYPE_TKIP:
-               if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
-                       /* Bad TTAK */
-                       decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
-                       break;
-               }
-               /* fall through if TTAK OK */
-       default:
-               if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
-                       decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
-               else
-                       decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-               break;
-       }
-
-       IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
-                                       decrypt_in, decrypt_out);
-
-       return decrypt_out;
-}
-
-static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
-                                       struct ieee80211_hdr *hdr,
-                                       u16 len,
-                                       u32 ampdu_status,
-                                       struct iwl_rx_mem_buffer *rxb,
-                                       struct ieee80211_rx_status *stats)
-{
-       struct sk_buff *skb;
-       __le16 fc = hdr->frame_control;
-
-       /* We only process data packets if the interface is open */
-       if (unlikely(!priv->is_open)) {
-               IWL_DEBUG_DROP_LIMIT(priv,
-                   "Dropping packet while interface is not open.\n");
-               return;
-       }
-
-       /* In case of HW accelerated crypto and bad decryption, drop */
-       if (!priv->cfg->mod_params->sw_crypto &&
-           iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
-               return;
-
-       skb = dev_alloc_skb(128);
-       if (!skb) {
-               IWL_ERR(priv, "dev_alloc_skb failed\n");
-               return;
-       }
-
-       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
-       iwl_legacy_update_stats(priv, false, fc, len);
-       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
-       ieee80211_rx(priv->hw, skb);
-       priv->alloc_rxb_page--;
-       rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct ieee80211_hdr *header;
-       struct ieee80211_rx_status rx_status;
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_rx_phy_res *phy_res;
-       __le32 rx_pkt_status;
-       struct iwl_rx_mpdu_res_start *amsdu;
-       u32 len;
-       u32 ampdu_status;
-       u32 rate_n_flags;
-
-       /**
-        * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
-        *      REPLY_RX: physical layer info is in this buffer
-        *      REPLY_RX_MPDU_CMD: physical layer info was sent in separate
-        *              command and cached in priv->last_phy_res
-        *
-        * Here we set up local variables depending on which command is
-        * received.
-        */
-       if (pkt->hdr.cmd == REPLY_RX) {
-               phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
-               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
-                               + phy_res->cfg_phy_cnt);
-
-               len = le16_to_cpu(phy_res->byte_count);
-               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
-                               phy_res->cfg_phy_cnt + len);
-               ampdu_status = le32_to_cpu(rx_pkt_status);
-       } else {
-               if (!priv->_4965.last_phy_res_valid) {
-                       IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-                       return;
-               }
-               phy_res = &priv->_4965.last_phy_res;
-               amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
-               header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
-               len = le16_to_cpu(amsdu->byte_count);
-               rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
-               ampdu_status = iwl4965_translate_rx_status(priv,
-                               le32_to_cpu(rx_pkt_status));
-       }
-
-       if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
-               IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
-                               phy_res->cfg_phy_cnt);
-               return;
-       }
-
-       if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
-           !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
-               IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
-                               le32_to_cpu(rx_pkt_status));
-               return;
-       }
-
-       /* This will be used in several places later */
-       rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
-       /* rx_status carries information about the packet to mac80211 */
-       rx_status.mactime = le64_to_cpu(phy_res->timestamp);
-       rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
-                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-       rx_status.freq =
-               ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
-                                                       rx_status.band);
-       rx_status.rate_idx =
-               iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
-       rx_status.flag = 0;
-
-       /* TSF isn't reliable. In order to allow smooth user experience,
-        * this W/A doesn't propagate it to the mac80211 */
-       /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
-       priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
-       /* Find max signal strength (dBm) among 3 antenna/receiver chains */
-       rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
-
-       iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
-       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
-               rx_status.signal, (unsigned long long)rx_status.mactime);
-
-       /*
-        * "antenna number"
-        *
-        * It seems that the antenna field in the phy flags value
-        * is actually a bit field. This is undefined by radiotap,
-        * it wants an actual antenna number but I always get "7"
-        * for most legacy frames I receive indicating that the
-        * same frame was received on all three RX chains.
-        *
-        * I think this field should be removed in favor of a
-        * new 802.11n radiotap field "RX chains" that is defined
-        * as a bitmask.
-        */
-       rx_status.antenna =
-               (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
-               >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
-       /* set the preamble flag if appropriate */
-       if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
-               rx_status.flag |= RX_FLAG_SHORTPRE;
-
-       /* Set up the HT phy flags */
-       if (rate_n_flags & RATE_MCS_HT_MSK)
-               rx_status.flag |= RX_FLAG_HT;
-       if (rate_n_flags & RATE_MCS_HT40_MSK)
-               rx_status.flag |= RX_FLAG_40MHZ;
-       if (rate_n_flags & RATE_MCS_SGI_MSK)
-               rx_status.flag |= RX_FLAG_SHORT_GI;
-
-       iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
-                                   rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
-                           struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       priv->_4965.last_phy_res_valid = true;
-       memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
-              sizeof(struct iwl_rx_phy_res));
-}
-
-static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
-                                    struct ieee80211_vif *vif,
-                                    enum ieee80211_band band,
-                                    u8 is_active, u8 n_probes,
-                                    struct iwl_scan_channel *scan_ch)
-{
-       struct ieee80211_channel *chan;
-       const struct ieee80211_supported_band *sband;
-       const struct iwl_channel_info *ch_info;
-       u16 passive_dwell = 0;
-       u16 active_dwell = 0;
-       int added, i;
-       u16 channel;
-
-       sband = iwl_get_hw_mode(priv, band);
-       if (!sband)
-               return 0;
-
-       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
-       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
-       if (passive_dwell <= active_dwell)
-               passive_dwell = active_dwell + 1;
-
-       for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
-               chan = priv->scan_request->channels[i];
-
-               if (chan->band != band)
-                       continue;
-
-               channel = chan->hw_value;
-               scan_ch->channel = cpu_to_le16(channel);
-
-               ch_info = iwl_legacy_get_channel_info(priv, band, channel);
-               if (!iwl_legacy_is_channel_valid(ch_info)) {
-                       IWL_DEBUG_SCAN(priv,
-                                "Channel %d is INVALID for this band.\n",
-                                       channel);
-                       continue;
-               }
-
-               if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
-                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
-                       scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
-               else
-                       scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
-               if (n_probes)
-                       scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
-               scan_ch->active_dwell = cpu_to_le16(active_dwell);
-               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
-               /* Set txpower levels to defaults */
-               scan_ch->dsp_atten = 110;
-
-               /* NOTE: if we were doing 6Mb OFDM for scans we'd use
-                * power level:
-                * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
-                */
-               if (band == IEEE80211_BAND_5GHZ)
-                       scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
-               else
-                       scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
-               IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
-                              channel, le32_to_cpu(scan_ch->type),
-                              (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
-                               "ACTIVE" : "PASSIVE",
-                              (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
-                              active_dwell : passive_dwell);
-
-               scan_ch++;
-               added++;
-       }
-
-       IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
-       return added;
-}
-
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_SCAN_CMD,
-               .len = sizeof(struct iwl_scan_cmd),
-               .flags = CMD_SIZE_HUGE,
-       };
-       struct iwl_scan_cmd *scan;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       u32 rate_flags = 0;
-       u16 cmd_len;
-       u16 rx_chain = 0;
-       enum ieee80211_band band;
-       u8 n_probes = 0;
-       u8 rx_ant = priv->hw_params.valid_rx_ant;
-       u8 rate;
-       bool is_active = false;
-       int  chan_mod;
-       u8 active_chains;
-       u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (vif)
-               ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-       if (!priv->scan_cmd) {
-               priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
-                                        IWL_MAX_SCAN_SIZE, GFP_KERNEL);
-               if (!priv->scan_cmd) {
-                       IWL_DEBUG_SCAN(priv,
-                                      "fail to allocate memory for scan\n");
-                       return -ENOMEM;
-               }
-       }
-       scan = priv->scan_cmd;
-       memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
-       scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
-       scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
-       if (iwl_legacy_is_any_associated(priv)) {
-               u16 interval;
-               u32 extra;
-               u32 suspend_time = 100;
-               u32 scan_suspend_time = 100;
-
-               IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-               interval = vif->bss_conf.beacon_int;
-
-               scan->suspend_time = 0;
-               scan->max_out_time = cpu_to_le32(200 * 1024);
-               if (!interval)
-                       interval = suspend_time;
-
-               extra = (suspend_time / interval) << 22;
-               scan_suspend_time = (extra |
-                   ((suspend_time % interval) * 1024));
-               scan->suspend_time = cpu_to_le32(scan_suspend_time);
-               IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
-                              scan_suspend_time, interval);
-       }
-
-       if (priv->scan_request->n_ssids) {
-               int i, p = 0;
-               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
-               for (i = 0; i < priv->scan_request->n_ssids; i++) {
-                       /* always does wildcard anyway */
-                       if (!priv->scan_request->ssids[i].ssid_len)
-                               continue;
-                       scan->direct_scan[p].id = WLAN_EID_SSID;
-                       scan->direct_scan[p].len =
-                               priv->scan_request->ssids[i].ssid_len;
-                       memcpy(scan->direct_scan[p].ssid,
-                              priv->scan_request->ssids[i].ssid,
-                              priv->scan_request->ssids[i].ssid_len);
-                       n_probes++;
-                       p++;
-               }
-               is_active = true;
-       } else
-               IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
-       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
-       scan->tx_cmd.sta_id = ctx->bcast_sta_id;
-       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-       switch (priv->scan_band) {
-       case IEEE80211_BAND_2GHZ:
-               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
-               chan_mod = le32_to_cpu(
-                       priv->contexts[IWL_RXON_CTX_BSS].active.flags &
-                                               RXON_FLG_CHANNEL_MODE_MSK)
-                                      >> RXON_FLG_CHANNEL_MODE_POS;
-               if (chan_mod == CHANNEL_MODE_PURE_40) {
-                       rate = IWL_RATE_6M_PLCP;
-               } else {
-                       rate = IWL_RATE_1M_PLCP;
-                       rate_flags = RATE_MCS_CCK_MSK;
-               }
-               break;
-       case IEEE80211_BAND_5GHZ:
-               rate = IWL_RATE_6M_PLCP;
-               break;
-       default:
-               IWL_WARN(priv, "Invalid scan band\n");
-               return -EIO;
-       }
-
-       /*
-        * If active scanning is requested but a certain channel is
-        * marked passive, we can do active scanning if we detect
-        * transmissions.
-        *
-        * There is an issue with some firmware versions that triggers
-        * a sysassert on a "good CRC threshold" of zero (== disabled),
-        * on a radar channel even though this means that we should NOT
-        * send probes.
-        *
-        * The "good CRC threshold" is the number of frames that we
-        * need to receive during our dwell time on a channel before
-        * sending out probes -- setting this to a huge value will
-        * mean we never reach it, but at the same time work around
-        * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
-        * here instead of IWL_GOOD_CRC_TH_DISABLED.
-        */
-       scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
-                                       IWL_GOOD_CRC_TH_NEVER;
-
-       band = priv->scan_band;
-
-       if (priv->cfg->scan_rx_antennas[band])
-               rx_ant = priv->cfg->scan_rx_antennas[band];
-
-       priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
-                                               priv->scan_tx_ant[band],
-                                                   scan_tx_antennas);
-       rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
-       scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
-
-       /* In power save mode use one chain, otherwise use all chains */
-       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-               /* rx_ant has been set to all valid chains previously */
-               active_chains = rx_ant &
-                               ((u8)(priv->chain_noise_data.active_chains));
-               if (!active_chains)
-                       active_chains = rx_ant;
-
-               IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
-                               priv->chain_noise_data.active_chains);
-
-               rx_ant = iwl4965_first_antenna(active_chains);
-       }
-
-       /* MIMO is not used here, but value is required */
-       rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
-       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
-       rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
-       rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
-       scan->rx_chain = cpu_to_le16(rx_chain);
-
-       cmd_len = iwl_legacy_fill_probe_req(priv,
-                                       (struct ieee80211_mgmt *)scan->data,
-                                       vif->addr,
-                                       priv->scan_request->ie,
-                                       priv->scan_request->ie_len,
-                                       IWL_MAX_SCAN_SIZE - sizeof(*scan));
-       scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
-       scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
-                              RXON_FILTER_BCON_AWARE_MSK);
-
-       scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
-                                               is_active, n_probes,
-                                               (void *)&scan->data[cmd_len]);
-       if (scan->channel_count == 0) {
-               IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
-               return -EIO;
-       }
-
-       cmd.len += le16_to_cpu(scan->tx_cmd.len) +
-           scan->channel_count * sizeof(struct iwl_scan_channel);
-       cmd.data = scan;
-       scan->len = cpu_to_le16(cmd.len);
-
-       set_bit(STATUS_SCAN_HW, &priv->status);
-
-       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
-       if (ret)
-               clear_bit(STATUS_SCAN_HW, &priv->status);
-
-       return ret;
-}
-
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
-                              struct ieee80211_vif *vif, bool add)
-{
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
-       if (add)
-               return iwl4965_add_bssid_station(priv, vif_priv->ctx,
-                                               vif->bss_conf.bssid,
-                                               &vif_priv->ibss_bssid_sta_id);
-       return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
-                                 vif->bss_conf.bssid);
-}
-
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
-                           int sta_id, int tid, int freed)
-{
-       lockdep_assert_held(&priv->sta_lock);
-
-       if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
-               priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
-       else {
-               IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
-                       priv->stations[sta_id].tid[tid].tfds_in_queue,
-                       freed);
-               priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
-       }
-}
-
-#define IWL_TX_QUEUE_MSK       0xfffff
-
-static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
-{
-       return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
-              priv->current_ht_config.single_chain_sufficient;
-}
-
-#define IWL_NUM_RX_CHAINS_MULTIPLE     3
-#define IWL_NUM_RX_CHAINS_SINGLE       2
-#define IWL_NUM_IDLE_CHAINS_DUAL       2
-#define IWL_NUM_IDLE_CHAINS_SINGLE     1
-
-/*
- * Determine how many receiver/antenna chains to use.
- *
- * More provides better reception via diversity.  Fewer saves power
- * at the expense of throughput, but only when not in powersave to
- * start with.
- *
- * MIMO (dual stream) requires at least 2, but works better with 3.
- * This does not determine *which* chains to use, just how many.
- */
-static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
-{
-       /* # of Rx chains to use when expecting MIMO. */
-       if (iwl4965_is_single_rx_stream(priv))
-               return IWL_NUM_RX_CHAINS_SINGLE;
-       else
-               return IWL_NUM_RX_CHAINS_MULTIPLE;
-}
-
-/*
- * When we are in power saving mode, unless device support spatial
- * multiplexing power save, use the active count for rx chain count.
- */
-static int
-iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
-{
-       /* # Rx chains when idling, depending on SMPS mode */
-       switch (priv->current_ht_config.smps) {
-       case IEEE80211_SMPS_STATIC:
-       case IEEE80211_SMPS_DYNAMIC:
-               return IWL_NUM_IDLE_CHAINS_SINGLE;
-       case IEEE80211_SMPS_OFF:
-               return active_cnt;
-       default:
-               WARN(1, "invalid SMPS mode %d",
-                    priv->current_ht_config.smps);
-               return active_cnt;
-       }
-}
-
-/* up to 4 chains */
-static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
-{
-       u8 res;
-       res = (chain_bitmap & BIT(0)) >> 0;
-       res += (chain_bitmap & BIT(1)) >> 1;
-       res += (chain_bitmap & BIT(2)) >> 2;
-       res += (chain_bitmap & BIT(3)) >> 3;
-       return res;
-}
-
-/**
- * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
- *
- * Selects how many and which Rx receivers/antennas/chains to use.
- * This should not be used for scan command ... it puts data in wrong place.
- */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       bool is_single = iwl4965_is_single_rx_stream(priv);
-       bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-       u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
-       u32 active_chains;
-       u16 rx_chain;
-
-       /* Tell uCode which antennas are actually connected.
-        * Before first association, we assume all antennas are connected.
-        * Just after first association, iwl4965_chain_noise_calibration()
-        *    checks which antennas actually *are* connected. */
-       if (priv->chain_noise_data.active_chains)
-               active_chains = priv->chain_noise_data.active_chains;
-       else
-               active_chains = priv->hw_params.valid_rx_ant;
-
-       rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
-
-       /* How many receivers should we use? */
-       active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
-       idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
-
-
-       /* correct rx chain count according hw settings
-        * and chain noise calibration
-        */
-       valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
-       if (valid_rx_cnt < active_rx_cnt)
-               active_rx_cnt = valid_rx_cnt;
-
-       if (valid_rx_cnt < idle_rx_cnt)
-               idle_rx_cnt = valid_rx_cnt;
-
-       rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
-       rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
-
-       ctx->staging.rx_chain = cpu_to_le16(rx_chain);
-
-       if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
-               ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
-       else
-               ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
-
-       IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
-                       ctx->staging.rx_chain,
-                       active_rx_cnt, idle_rx_cnt);
-
-       WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
-               active_rx_cnt < idle_rx_cnt);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
-{
-       int i;
-       u8 ind = ant;
-
-       for (i = 0; i < RATE_ANT_NUM - 1; i++) {
-               ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
-               if (valid & BIT(ind))
-                       return ind;
-       }
-       return ant;
-}
-
-static const char *iwl4965_get_fh_string(int cmd)
-{
-       switch (cmd) {
-       IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
-       IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
-       IWL_CMD(FH_RSCSR_CHNL0_WPTR);
-       IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
-       IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
-       IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
-       IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
-       IWL_CMD(FH_TSSR_TX_STATUS_REG);
-       IWL_CMD(FH_TSSR_TX_ERROR_REG);
-       default:
-               return "UNKNOWN";
-       }
-}
-
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
-       int i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       int pos = 0;
-       size_t bufsz = 0;
-#endif
-       static const u32 fh_tbl[] = {
-               FH_RSCSR_CHNL0_STTS_WPTR_REG,
-               FH_RSCSR_CHNL0_RBDCB_BASE_REG,
-               FH_RSCSR_CHNL0_WPTR,
-               FH_MEM_RCSR_CHNL0_CONFIG_REG,
-               FH_MEM_RSSR_SHARED_CTRL_REG,
-               FH_MEM_RSSR_RX_STATUS_REG,
-               FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
-               FH_TSSR_TX_STATUS_REG,
-               FH_TSSR_TX_ERROR_REG
-       };
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (display) {
-               bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
-               *buf = kmalloc(bufsz, GFP_KERNEL);
-               if (!*buf)
-                       return -ENOMEM;
-               pos += scnprintf(*buf + pos, bufsz - pos,
-                               "FH register values:\n");
-               for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
-                       pos += scnprintf(*buf + pos, bufsz - pos,
-                               "  %34s: 0X%08x\n",
-                               iwl4965_get_fh_string(fh_tbl[i]),
-                               iwl_legacy_read_direct32(priv, fh_tbl[i]));
-               }
-               return pos;
-       }
-#endif
-       IWL_ERR(priv, "FH register values:\n");
-       for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
-               IWL_ERR(priv, "  %34s: 0X%08x\n",
-                       iwl4965_get_fh_string(fh_tbl[i]),
-                       iwl_legacy_read_direct32(priv, fh_tbl[i]));
-       }
-       return 0;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644 (file)
index 57ebe21..0000000
+++ /dev/null
@@ -1,2871 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-dev.h"
-#include "iwl-sta.h"
-#include "iwl-core.h"
-#include "iwl-4965.h"
-
-#define IWL4965_RS_NAME "iwl-4965-rs"
-
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY      1
-#define IWL_HT_NUMBER_TRY   3
-
-#define IWL_RATE_MAX_WINDOW            62      /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH                6       /* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH                8       /* min successes to calc tpt */
-
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX            15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
-
-static u8 rs_ht_to_legacy[] = {
-       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
-       IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
-       IWL_RATE_6M_INDEX,
-       IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
-       IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
-       IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
-       IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
-};
-
-static const u8 ant_toggle_lookup[] = {
-       /*ANT_NONE -> */ ANT_NONE,
-       /*ANT_A    -> */ ANT_B,
-       /*ANT_B    -> */ ANT_C,
-       /*ANT_AB   -> */ ANT_BC,
-       /*ANT_C    -> */ ANT_A,
-       /*ANT_AC   -> */ ANT_AB,
-       /*ANT_BC   -> */ ANT_AC,
-       /*ANT_ABC  -> */ ANT_ABC,
-};
-
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
-       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
-                                   IWL_RATE_SISO_##s##M_PLCP, \
-                                   IWL_RATE_MIMO2_##s##M_PLCP,\
-                                   IWL_RATE_##r##M_IEEE,      \
-                                   IWL_RATE_##ip##M_INDEX,    \
-                                   IWL_RATE_##in##M_INDEX,    \
-                                   IWL_RATE_##rp##M_INDEX,    \
-                                   IWL_RATE_##rn##M_INDEX,    \
-                                   IWL_RATE_##pp##M_INDEX,    \
-                                   IWL_RATE_##np##M_INDEX }
-
-/*
- * Parameter order:
- *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
-       IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
-       IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
-       IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
-       IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
-       IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
-       IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
-       IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
-       IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
-       IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
-       IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
-       IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
-       IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-       IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
-};
-
-static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
-{
-       int idx = 0;
-
-       /* HT rate format */
-       if (rate_n_flags & RATE_MCS_HT_MSK) {
-               idx = (rate_n_flags & 0xff);
-
-               if (idx >= IWL_RATE_MIMO2_6M_PLCP)
-                       idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
-               idx += IWL_FIRST_OFDM_RATE;
-               /* skip 9M not supported in ht*/
-               if (idx >= IWL_RATE_9M_INDEX)
-                       idx += 1;
-               if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
-                       return idx;
-
-       /* legacy rate format, search for match in table */
-       } else {
-               for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
-                       if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
-                               return idx;
-       }
-
-       return -1;
-}
-
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
-                                  struct sk_buff *skb,
-                                  struct ieee80211_sta *sta,
-                                  struct iwl_lq_sta *lq_sta);
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
-                            struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
-static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
-                                       bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-                            u32 *rate_n_flags, int index);
-#else
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-                            u32 *rate_n_flags, int index)
-{}
-#endif
-
-/**
- * The following tables contain the expected throughput metrics for all rates
- *
- *     1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- *
- * where invalid entries are zeros.
- *
- * CCK rates are only valid in legacy table and will only be used in G
- * (2.4 GHz) band.
- */
-
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
-       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
-};
-
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 42, 0,  76, 102, 124, 158, 183, 193, 202}, /* Norm */
-       {0, 0, 0, 0, 46, 0,  82, 110, 132, 167, 192, 202, 210}, /* SGI */
-       {0, 0, 0, 0, 48, 0,  93, 135, 176, 251, 319, 351, 381}, /* AGG */
-       {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
-       {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
-       {0, 0, 0, 0,  96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
-       {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
-       {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
-       {0, 0, 0, 0,  92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
-       {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
-};
-
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
-       {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
-       {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
-       {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
-};
-
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
-       {  "1", "BPSK DSSS"},
-       {  "2", "QPSK DSSS"},
-       {"5.5", "BPSK CCK"},
-       { "11", "QPSK CCK"},
-       {  "6", "BPSK 1/2"},
-       {  "9", "BPSK 1/2"},
-       { "12", "QPSK 1/2"},
-       { "18", "QPSK 3/4"},
-       { "24", "16QAM 1/2"},
-       { "36", "16QAM 3/4"},
-       { "48", "64QAM 2/3"},
-       { "54", "64QAM 3/4"},
-       { "60", "64QAM 5/6"},
-};
-
-#define MCS_INDEX_PER_STREAM   (8)
-
-static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
-{
-       return (u8)(rate_n_flags & 0xFF);
-}
-
-static void
-iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
-{
-       window->data = 0;
-       window->success_counter = 0;
-       window->success_ratio = IWL_INVALID_VALUE;
-       window->counter = 0;
-       window->average_tpt = IWL_INVALID_VALUE;
-       window->stamp = 0;
-}
-
-static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
-{
-       return (ant_type & valid_antenna) == ant_type;
-}
-
-/*
- *     removes the old data from the statistics. All data that is older than
- *     TID_MAX_TIME_DIFF, will be deleted.
- */
-static void
-iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
-       /* The oldest age we want to keep */
-       u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
-       while (tl->queue_count &&
-              (tl->time_stamp < oldest_time)) {
-               tl->total -= tl->packet_count[tl->head];
-               tl->packet_count[tl->head] = 0;
-               tl->time_stamp += TID_QUEUE_CELL_SPACING;
-               tl->queue_count--;
-               tl->head++;
-               if (tl->head >= TID_QUEUE_MAX_SIZE)
-                       tl->head = 0;
-       }
-}
-
-/*
- *     increment traffic load value for tid and also remove
- *     any old values if passed the certain time period
- */
-static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
-                          struct ieee80211_hdr *hdr)
-{
-       u32 curr_time = jiffies_to_msecs(jiffies);
-       u32 time_diff;
-       s32 index;
-       struct iwl_traffic_load *tl = NULL;
-       u8 tid;
-
-       if (ieee80211_is_data_qos(hdr->frame_control)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & 0xf;
-       } else
-               return MAX_TID_COUNT;
-
-       if (unlikely(tid >= TID_MAX_LOAD_COUNT))
-               return MAX_TID_COUNT;
-
-       tl = &lq_data->load[tid];
-
-       curr_time -= curr_time % TID_ROUND_VALUE;
-
-       /* Happens only for the first packet. Initialize the data */
-       if (!(tl->queue_count)) {
-               tl->total = 1;
-               tl->time_stamp = curr_time;
-               tl->queue_count = 1;
-               tl->head = 0;
-               tl->packet_count[0] = 1;
-               return MAX_TID_COUNT;
-       }
-
-       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
-       index = time_diff / TID_QUEUE_CELL_SPACING;
-
-       /* The history is too long: remove data that is older than */
-       /* TID_MAX_TIME_DIFF */
-       if (index >= TID_QUEUE_MAX_SIZE)
-               iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
-       index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
-       tl->packet_count[index] = tl->packet_count[index] + 1;
-       tl->total = tl->total + 1;
-
-       if ((index + 1) > tl->queue_count)
-               tl->queue_count = index + 1;
-
-       return tid;
-}
-
-/*
-       get the traffic load value for tid
-*/
-static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
-       u32 curr_time = jiffies_to_msecs(jiffies);
-       u32 time_diff;
-       s32 index;
-       struct iwl_traffic_load *tl = NULL;
-
-       if (tid >= TID_MAX_LOAD_COUNT)
-               return 0;
-
-       tl = &(lq_data->load[tid]);
-
-       curr_time -= curr_time % TID_ROUND_VALUE;
-
-       if (!(tl->queue_count))
-               return 0;
-
-       time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
-       index = time_diff / TID_QUEUE_CELL_SPACING;
-
-       /* The history is too long: remove data that is older than */
-       /* TID_MAX_TIME_DIFF */
-       if (index >= TID_QUEUE_MAX_SIZE)
-               iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
-       return tl->total;
-}
-
-static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
-                                     struct iwl_lq_sta *lq_data, u8 tid,
-                                     struct ieee80211_sta *sta)
-{
-       int ret = -EAGAIN;
-       u32 load;
-
-       load = iwl4965_rs_tl_get_load(lq_data, tid);
-
-       if (load > IWL_AGG_LOAD_THRESHOLD) {
-               IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
-                               sta->addr, tid);
-               ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
-               if (ret == -EAGAIN) {
-                       /*
-                        * driver and mac80211 is out of sync
-                        * this might be cause by reloading firmware
-                        * stop the tx ba session here
-                        */
-                       IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
-                               tid);
-                       ieee80211_stop_tx_ba_session(sta, tid);
-               }
-       } else {
-               IWL_ERR(priv, "Aggregation not enabled for tid %d "
-                       "because load = %u\n", tid, load);
-       }
-       return ret;
-}
-
-static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
-                             struct iwl_lq_sta *lq_data,
-                             struct ieee80211_sta *sta)
-{
-       if (tid < TID_MAX_LOAD_COUNT)
-               iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
-       else
-               IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
-                       tid, TID_MAX_LOAD_COUNT);
-}
-
-static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
-{
-       return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
-              !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
-              !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
-}
-
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32
-iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
-       if (tbl->expected_tpt)
-               return tbl->expected_tpt[rs_index];
-       return 0;
-}
-
-/**
- * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 62 packets transmitted
- * at this rate.  window->data contains the bitmask of successful
- * packets.
- */
-static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
-                             int scale_index, int attempts, int successes)
-{
-       struct iwl_rate_scale_data *window = NULL;
-       static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
-       s32 fail_count, tpt;
-
-       if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
-               return -EINVAL;
-
-       /* Select window for current tx bit rate */
-       window = &(tbl->win[scale_index]);
-
-       /* Get expected throughput */
-       tpt = iwl4965_get_expected_tpt(tbl, scale_index);
-
-       /*
-        * Keep track of only the latest 62 tx frame attempts in this rate's
-        * history window; anything older isn't really relevant any more.
-        * If we have filled up the sliding window, drop the oldest attempt;
-        * if the oldest attempt (highest bit in bitmap) shows "success",
-        * subtract "1" from the success counter (this is the main reason
-        * we keep these bitmaps!).
-        */
-       while (attempts > 0) {
-               if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
-                       /* remove earliest */
-                       window->counter = IWL_RATE_MAX_WINDOW - 1;
-
-                       if (window->data & mask) {
-                               window->data &= ~mask;
-                               window->success_counter--;
-                       }
-               }
-
-               /* Increment frames-attempted counter */
-               window->counter++;
-
-               /* Shift bitmap by one frame to throw away oldest history */
-               window->data <<= 1;
-
-               /* Mark the most recent #successes attempts as successful */
-               if (successes > 0) {
-                       window->success_counter++;
-                       window->data |= 0x1;
-                       successes--;
-               }
-
-               attempts--;
-       }
-
-       /* Calculate current success ratio, avoid divide-by-0! */
-       if (window->counter > 0)
-               window->success_ratio = 128 * (100 * window->success_counter)
-                                       / window->counter;
-       else
-               window->success_ratio = IWL_INVALID_VALUE;
-
-       fail_count = window->counter - window->success_counter;
-
-       /* Calculate average throughput, if we have enough history. */
-       if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
-           (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
-               window->average_tpt = (window->success_ratio * tpt + 64) / 128;
-       else
-               window->average_tpt = IWL_INVALID_VALUE;
-
-       /* Tag this window as having been updated */
-       window->stamp = jiffies;
-
-       return 0;
-}
-
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
-                                struct iwl_scale_tbl_info *tbl,
-                                int index, u8 use_green)
-{
-       u32 rate_n_flags = 0;
-
-       if (is_legacy(tbl->lq_type)) {
-               rate_n_flags = iwlegacy_rates[index].plcp;
-               if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
-                       rate_n_flags |= RATE_MCS_CCK_MSK;
-
-       } else if (is_Ht(tbl->lq_type)) {
-               if (index > IWL_LAST_OFDM_RATE) {
-                       IWL_ERR(priv, "Invalid HT rate index %d\n", index);
-                       index = IWL_LAST_OFDM_RATE;
-               }
-               rate_n_flags = RATE_MCS_HT_MSK;
-
-               if (is_siso(tbl->lq_type))
-                       rate_n_flags |= iwlegacy_rates[index].plcp_siso;
-               else
-                       rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
-       } else {
-               IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
-       }
-
-       rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
-                                                    RATE_MCS_ANT_ABC_MSK);
-
-       if (is_Ht(tbl->lq_type)) {
-               if (tbl->is_ht40) {
-                       if (tbl->is_dup)
-                               rate_n_flags |= RATE_MCS_DUP_MSK;
-                       else
-                               rate_n_flags |= RATE_MCS_HT40_MSK;
-               }
-               if (tbl->is_SGI)
-                       rate_n_flags |= RATE_MCS_SGI_MSK;
-
-               if (use_green) {
-                       rate_n_flags |= RATE_MCS_GF_MSK;
-                       if (is_siso(tbl->lq_type) && tbl->is_SGI) {
-                               rate_n_flags &= ~RATE_MCS_SGI_MSK;
-                               IWL_ERR(priv, "GF was set with SGI:SISO\n");
-                       }
-               }
-       }
-       return rate_n_flags;
-}
-
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
-                                   enum ieee80211_band band,
-                                   struct iwl_scale_tbl_info *tbl,
-                                   int *rate_idx)
-{
-       u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
-       u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
-       u8 mcs;
-
-       memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
-       *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
-
-       if (*rate_idx  == IWL_RATE_INVALID) {
-               *rate_idx = -1;
-               return -EINVAL;
-       }
-       tbl->is_SGI = 0;        /* default legacy setup */
-       tbl->is_ht40 = 0;
-       tbl->is_dup = 0;
-       tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
-       tbl->lq_type = LQ_NONE;
-       tbl->max_search = IWL_MAX_SEARCH;
-
-       /* legacy rate format */
-       if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
-               if (iwl4965_num_of_ant == 1) {
-                       if (band == IEEE80211_BAND_5GHZ)
-                               tbl->lq_type = LQ_A;
-                       else
-                               tbl->lq_type = LQ_G;
-               }
-       /* HT rate format */
-       } else {
-               if (rate_n_flags & RATE_MCS_SGI_MSK)
-                       tbl->is_SGI = 1;
-
-               if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
-                   (rate_n_flags & RATE_MCS_DUP_MSK))
-                       tbl->is_ht40 = 1;
-
-               if (rate_n_flags & RATE_MCS_DUP_MSK)
-                       tbl->is_dup = 1;
-
-               mcs = iwl4965_rs_extract_rate(rate_n_flags);
-
-               /* SISO */
-               if (mcs <= IWL_RATE_SISO_60M_PLCP) {
-                       if (iwl4965_num_of_ant == 1)
-                               tbl->lq_type = LQ_SISO; /*else NONE*/
-               /* MIMO2 */
-               } else {
-                       if (iwl4965_num_of_ant == 2)
-                               tbl->lq_type = LQ_MIMO2;
-               }
-       }
-       return 0;
-}
-
-/* switch to another antenna/antennas and return 1 */
-/* if no other valid antenna found, return 0 */
-static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
-                            struct iwl_scale_tbl_info *tbl)
-{
-       u8 new_ant_type;
-
-       if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
-               return 0;
-
-       if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
-               return 0;
-
-       new_ant_type = ant_toggle_lookup[tbl->ant_type];
-
-       while ((new_ant_type != tbl->ant_type) &&
-              !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
-               new_ant_type = ant_toggle_lookup[new_ant_type];
-
-       if (new_ant_type == tbl->ant_type)
-               return 0;
-
-       tbl->ant_type = new_ant_type;
-       *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
-       *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
-       return 1;
-}
-
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
-{
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-       return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
-               !(ctx->ht.non_gf_sta_present);
-}
-
-/**
- * iwl4965_rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
-static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
-                                 struct ieee80211_hdr *hdr,
-                                 enum iwl_table_type rate_type)
-{
-       if (is_legacy(rate_type)) {
-               return lq_sta->active_legacy_rate;
-       } else {
-               if (is_siso(rate_type))
-                       return lq_sta->active_siso_rate;
-               else
-                       return lq_sta->active_mimo2_rate;
-       }
-}
-
-static u16
-iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
-                               int rate_type)
-{
-       u8 high = IWL_RATE_INVALID;
-       u8 low = IWL_RATE_INVALID;
-
-       /* 802.11A or ht walks to the next literal adjacent rate in
-        * the rate table */
-       if (is_a_band(rate_type) || !is_legacy(rate_type)) {
-               int i;
-               u32 mask;
-
-               /* Find the previous rate that is in the rate mask */
-               i = index - 1;
-               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
-                       if (rate_mask & mask) {
-                               low = i;
-                               break;
-                       }
-               }
-
-               /* Find the next rate that is in the rate mask */
-               i = index + 1;
-               for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
-                       if (rate_mask & mask) {
-                               high = i;
-                               break;
-                       }
-               }
-
-               return (high << 8) | low;
-       }
-
-       low = index;
-       while (low != IWL_RATE_INVALID) {
-               low = iwlegacy_rates[low].prev_rs;
-               if (low == IWL_RATE_INVALID)
-                       break;
-               if (rate_mask & (1 << low))
-                       break;
-               IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
-       }
-
-       high = index;
-       while (high != IWL_RATE_INVALID) {
-               high = iwlegacy_rates[high].next_rs;
-               if (high == IWL_RATE_INVALID)
-                       break;
-               if (rate_mask & (1 << high))
-                       break;
-               IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
-       }
-
-       return (high << 8) | low;
-}
-
-static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
-                            struct iwl_scale_tbl_info *tbl,
-                            u8 scale_index, u8 ht_possible)
-{
-       s32 low;
-       u16 rate_mask;
-       u16 high_low;
-       u8 switch_to_legacy = 0;
-       u8 is_green = lq_sta->is_green;
-       struct iwl_priv *priv = lq_sta->drv;
-
-       /* check if we need to switch from HT to legacy rates.
-        * assumption is that mandatory rates (1Mbps or 6Mbps)
-        * are always supported (spec demand) */
-       if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
-               switch_to_legacy = 1;
-               scale_index = rs_ht_to_legacy[scale_index];
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
-                       tbl->lq_type = LQ_A;
-               else
-                       tbl->lq_type = LQ_G;
-
-               if (iwl4965_num_of_ant(tbl->ant_type) > 1)
-                       tbl->ant_type =
-                               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
-               tbl->is_ht40 = 0;
-               tbl->is_SGI = 0;
-               tbl->max_search = IWL_MAX_SEARCH;
-       }
-
-       rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
-
-       /* Mask with station rate restriction */
-       if (is_legacy(tbl->lq_type)) {
-               /* supp_rates has no CCK bits in A mode */
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
-                       rate_mask  = (u16)(rate_mask &
-                          (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
-               else
-                       rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
-       }
-
-       /* If we switched from HT to legacy, check current rate */
-       if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
-               low = scale_index;
-               goto out;
-       }
-
-       high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
-                                       scale_index, rate_mask,
-                                       tbl->lq_type);
-       low = high_low & 0xff;
-
-       if (low == IWL_RATE_INVALID)
-               low = scale_index;
-
-out:
-       return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
-}
-
-/*
- * Simple function to compare two rate scale table types
- */
-static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
-                              struct iwl_scale_tbl_info *b)
-{
-       return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
-               (a->is_SGI == b->is_SGI);
-}
-
-/*
- * mac80211 sends us Tx status
- */
-static void
-iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
-                        struct ieee80211_sta *sta, void *priv_sta,
-                        struct sk_buff *skb)
-{
-       int legacy_success;
-       int retries;
-       int rs_index, mac_index, i;
-       struct iwl_lq_sta *lq_sta = priv_sta;
-       struct iwl_link_quality_cmd *table;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_priv *priv = (struct iwl_priv *)priv_r;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       enum mac80211_rate_control_flags mac_flags;
-       u32 tx_rate;
-       struct iwl_scale_tbl_info tbl_type;
-       struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-       IWL_DEBUG_RATE_LIMIT(priv,
-               "get frame ack response, update rate scale window\n");
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (!lq_sta) {
-               IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
-               return;
-       } else if (!lq_sta->drv) {
-               IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
-               return;
-       }
-
-       if (!ieee80211_is_data(hdr->frame_control) ||
-           info->flags & IEEE80211_TX_CTL_NO_ACK)
-               return;
-
-       /* This packet was aggregated but doesn't carry status info */
-       if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
-           !(info->flags & IEEE80211_TX_STAT_AMPDU))
-               return;
-
-       /*
-        * Ignore this Tx frame response if its initial rate doesn't match
-        * that of latest Link Quality command.  There may be stragglers
-        * from a previous Link Quality command, but we're no longer interested
-        * in those; they're either from the "active" mode while we're trying
-        * to check "search" mode, or a prior "search" mode after we've moved
-        * to a new "search" mode (which might become the new "active" mode).
-        */
-       table = &lq_sta->lq;
-       tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
-       iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
-                        priv->band, &tbl_type, &rs_index);
-       if (priv->band == IEEE80211_BAND_5GHZ)
-               rs_index -= IWL_FIRST_OFDM_RATE;
-       mac_flags = info->status.rates[0].flags;
-       mac_index = info->status.rates[0].idx;
-       /* For HT packets, map MCS to PLCP */
-       if (mac_flags & IEEE80211_TX_RC_MCS) {
-               mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
-               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
-                       mac_index++;
-               /*
-                * mac80211 HT index is always zero-indexed; we need to move
-                * HT OFDM rates after CCK rates in 2.4 GHz band
-                */
-               if (priv->band == IEEE80211_BAND_2GHZ)
-                       mac_index += IWL_FIRST_OFDM_RATE;
-       }
-       /* Here we actually compare this rate to the latest LQ command */
-       if ((mac_index < 0) ||
-           (tbl_type.is_SGI !=
-                       !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-           (tbl_type.is_ht40 !=
-                       !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
-           (tbl_type.is_dup !=
-                       !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
-           (tbl_type.ant_type != info->antenna_sel_tx) ||
-           (!!(tx_rate & RATE_MCS_HT_MSK) !=
-                       !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
-           (!!(tx_rate & RATE_MCS_GF_MSK) !=
-                       !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
-           (rs_index != mac_index)) {
-               IWL_DEBUG_RATE(priv,
-               "initial rate %d does not match %d (0x%x)\n",
-                        mac_index, rs_index, tx_rate);
-               /*
-                * Since rates mis-match, the last LQ command may have failed.
-                * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
-                * ... driver.
-                */
-               lq_sta->missed_rate_counter++;
-               if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
-                       lq_sta->missed_rate_counter = 0;
-                       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
-                                                       CMD_ASYNC, false);
-               }
-               /* Regardless, ignore this status info for outdated rate */
-               return;
-       } else
-               /* Rate did match, so reset the missed_rate_counter */
-               lq_sta->missed_rate_counter = 0;
-
-       /* Figure out if rate scale algorithm is in active or search table */
-       if (iwl4965_table_type_matches(&tbl_type,
-                               &(lq_sta->lq_info[lq_sta->active_tbl]))) {
-               curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-               other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-       } else if (iwl4965_table_type_matches(&tbl_type,
-                               &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
-               curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-               other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-       } else {
-               IWL_DEBUG_RATE(priv,
-                       "Neither active nor search matches tx rate\n");
-               tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-               IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
-                       tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
-               tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
-               IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
-                       tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
-               IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
-                       tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
-               /*
-                * no matching table found, let's by-pass the data collection
-                * and continue to perform rate scale to find the rate table
-                */
-               iwl4965_rs_stay_in_table(lq_sta, true);
-               goto done;
-       }
-
-       /*
-        * Updating the frame history depends on whether packets were
-        * aggregated.
-        *
-        * For aggregation, all packets were transmitted at the same rate, the
-        * first index into rate scale table.
-        */
-       if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-               tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
-               iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
-                               &rs_index);
-               iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
-                                  info->status.ampdu_len,
-                                  info->status.ampdu_ack_len);
-
-               /* Update success/fail counts if not searching for new mode */
-               if (lq_sta->stay_in_tbl) {
-                       lq_sta->total_success += info->status.ampdu_ack_len;
-                       lq_sta->total_failed += (info->status.ampdu_len -
-                                       info->status.ampdu_ack_len);
-               }
-       } else {
-       /*
-        * For legacy, update frame history with for each Tx retry.
-        */
-               retries = info->status.rates[0].count - 1;
-               /* HW doesn't send more than 15 retries */
-               retries = min(retries, 15);
-
-               /* The last transmission may have been successful */
-               legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
-               /* Collect data for each rate used during failed TX attempts */
-               for (i = 0; i <= retries; ++i) {
-                       tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
-                       iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
-                                       &tbl_type, &rs_index);
-                       /*
-                        * Only collect stats if retried rate is in the same RS
-                        * table as active/search.
-                        */
-                       if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
-                               tmp_tbl = curr_tbl;
-                       else if (iwl4965_table_type_matches(&tbl_type,
-                                                                other_tbl))
-                               tmp_tbl = other_tbl;
-                       else
-                               continue;
-                       iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
-                                          i < retries ? 0 : legacy_success);
-               }
-
-               /* Update success/fail counts if not searching for new mode */
-               if (lq_sta->stay_in_tbl) {
-                       lq_sta->total_success += legacy_success;
-                       lq_sta->total_failed += retries + (1 - legacy_success);
-               }
-       }
-       /* The last TX rate is cached in lq_sta; it's set in if/else above */
-       lq_sta->last_rate_n_flags = tx_rate;
-done:
-       /* See if there's a better rate or modulation mode to try. */
-       if (sta && sta->supp_rates[sband->band])
-               iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
-}
-
-/*
- * Begin a period of staying with a selected modulation mode.
- * Set "stay_in_tbl" flag to prevent any mode switches.
- * Set frame tx success limits according to legacy vs. high-throughput,
- * and reset overall (spanning all rates) tx success history statistics.
- * These control how long we stay using same modulation mode before
- * searching for a new mode.
- */
-static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
-                                struct iwl_lq_sta *lq_sta)
-{
-       IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
-       lq_sta->stay_in_tbl = 1;        /* only place this gets set */
-       if (is_legacy) {
-               lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
-               lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
-               lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
-       } else {
-               lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
-               lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
-               lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
-       }
-       lq_sta->table_count = 0;
-       lq_sta->total_failed = 0;
-       lq_sta->total_success = 0;
-       lq_sta->flush_timer = jiffies;
-       lq_sta->action_counter = 0;
-}
-
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
-                                     struct iwl_scale_tbl_info *tbl)
-{
-       /* Used to choose among HT tables */
-       s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
-
-       /* Check for invalid LQ type */
-       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
-               tbl->expected_tpt = expected_tpt_legacy;
-               return;
-       }
-
-       /* Legacy rates have only one table */
-       if (is_legacy(tbl->lq_type)) {
-               tbl->expected_tpt = expected_tpt_legacy;
-               return;
-       }
-
-       /* Choose among many HT tables depending on number of streams
-        * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
-        * status */
-       if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
-               ht_tbl_pointer = expected_tpt_siso20MHz;
-       else if (is_siso(tbl->lq_type))
-               ht_tbl_pointer = expected_tpt_siso40MHz;
-       else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
-               ht_tbl_pointer = expected_tpt_mimo2_20MHz;
-       else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
-               ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-
-       if (!tbl->is_SGI && !lq_sta->is_agg)            /* Normal */
-               tbl->expected_tpt = ht_tbl_pointer[0];
-       else if (tbl->is_SGI && !lq_sta->is_agg)        /* SGI */
-               tbl->expected_tpt = ht_tbl_pointer[1];
-       else if (!tbl->is_SGI && lq_sta->is_agg)        /* AGG */
-               tbl->expected_tpt = ht_tbl_pointer[2];
-       else                                            /* AGG+SGI */
-               tbl->expected_tpt = ht_tbl_pointer[3];
-}
-
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput.  When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
-static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
-                           struct iwl_lq_sta *lq_sta,
-                           struct iwl_scale_tbl_info *tbl,     /* "search" */
-                           u16 rate_mask, s8 index)
-{
-       /* "active" values */
-       struct iwl_scale_tbl_info *active_tbl =
-           &(lq_sta->lq_info[lq_sta->active_tbl]);
-       s32 active_sr = active_tbl->win[index].success_ratio;
-       s32 active_tpt = active_tbl->expected_tpt[index];
-
-       /* expected "search" throughput */
-       s32 *tpt_tbl = tbl->expected_tpt;
-
-       s32 new_rate, high, low, start_hi;
-       u16 high_low;
-       s8 rate = index;
-
-       new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
-       for (; ;) {
-               high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
-                                               tbl->lq_type);
-
-               low = high_low & 0xff;
-               high = (high_low >> 8) & 0xff;
-
-               /*
-                * Lower the "search" bit rate, to give new "search" mode
-                * approximately the same throughput as "active" if:
-                *
-                * 1) "Active" mode has been working modestly well (but not
-                *    great), and expected "search" throughput (under perfect
-                *    conditions) at candidate rate is above the actual
-                *    measured "active" throughput (but less than expected
-                *    "active" throughput under perfect conditions).
-                * OR
-                * 2) "Active" mode has been working perfectly or very well
-                *    and expected "search" throughput (under perfect
-                *    conditions) at candidate rate is above expected
-                *    "active" throughput (under perfect conditions).
-                */
-               if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
-                    ((active_sr > IWL_RATE_DECREASE_TH) &&
-                     (active_sr <= IWL_RATE_HIGH_TH) &&
-                     (tpt_tbl[rate] <= active_tpt))) ||
-                   ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
-                    (tpt_tbl[rate] > active_tpt))) {
-
-                       /* (2nd or later pass)
-                        * If we've already tried to raise the rate, and are
-                        * now trying to lower it, use the higher rate. */
-                       if (start_hi != IWL_RATE_INVALID) {
-                               new_rate = start_hi;
-                               break;
-                       }
-
-                       new_rate = rate;
-
-                       /* Loop again with lower rate */
-                       if (low != IWL_RATE_INVALID)
-                               rate = low;
-
-                       /* Lower rate not available, use the original */
-                       else
-                               break;
-
-               /* Else try to raise the "search" rate to match "active" */
-               } else {
-                       /* (2nd or later pass)
-                        * If we've already tried to lower the rate, and are
-                        * now trying to raise it, use the lower rate. */
-                       if (new_rate != IWL_RATE_INVALID)
-                               break;
-
-                       /* Loop again with higher rate */
-                       else if (high != IWL_RATE_INVALID) {
-                               start_hi = high;
-                               rate = high;
-
-                       /* Higher rate not available, use the original */
-                       } else {
-                               new_rate = rate;
-                               break;
-                       }
-               }
-       }
-
-       return new_rate;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
-                            struct iwl_lq_sta *lq_sta,
-                            struct ieee80211_conf *conf,
-                            struct ieee80211_sta *sta,
-                            struct iwl_scale_tbl_info *tbl, int index)
-{
-       u16 rate_mask;
-       s32 rate;
-       s8 is_green = lq_sta->is_green;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
-               return -1;
-
-       if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
-                                               == WLAN_HT_CAP_SM_PS_STATIC)
-               return -1;
-
-       /* Need both Tx chains/antennas to support MIMO */
-       if (priv->hw_params.tx_chains_num < 2)
-               return -1;
-
-       IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
-
-       tbl->lq_type = LQ_MIMO2;
-       tbl->is_dup = lq_sta->is_dup;
-       tbl->action = 0;
-       tbl->max_search = IWL_MAX_SEARCH;
-       rate_mask = lq_sta->active_mimo2_rate;
-
-       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
-               tbl->is_ht40 = 1;
-       else
-               tbl->is_ht40 = 0;
-
-       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-
-       rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
-       IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
-                               rate, rate_mask);
-       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-               IWL_DEBUG_RATE(priv,
-                               "Can't switch with index %d rate mask %x\n",
-                                               rate, rate_mask);
-               return -1;
-       }
-       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
-                                                tbl, rate, is_green);
-
-       IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
-                    tbl->current_rate, is_green);
-       return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
-                            struct iwl_lq_sta *lq_sta,
-                            struct ieee80211_conf *conf,
-                            struct ieee80211_sta *sta,
-                            struct iwl_scale_tbl_info *tbl, int index)
-{
-       u16 rate_mask;
-       u8 is_green = lq_sta->is_green;
-       s32 rate;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-       if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
-               return -1;
-
-       IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
-
-       tbl->is_dup = lq_sta->is_dup;
-       tbl->lq_type = LQ_SISO;
-       tbl->action = 0;
-       tbl->max_search = IWL_MAX_SEARCH;
-       rate_mask = lq_sta->active_siso_rate;
-
-       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
-               tbl->is_ht40 = 1;
-       else
-               tbl->is_ht40 = 0;
-
-       if (is_green)
-               tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
-       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-       rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
-       IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
-       if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
-               IWL_DEBUG_RATE(priv,
-                       "can not switch with index %d rate mask %x\n",
-                            rate, rate_mask);
-               return -1;
-       }
-       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
-                                               tbl, rate, is_green);
-       IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
-                    tbl->current_rate, is_green);
-       return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
-                               struct iwl_lq_sta *lq_sta,
-                               struct ieee80211_conf *conf,
-                               struct ieee80211_sta *sta,
-                               int index)
-{
-       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-       struct iwl_scale_tbl_info *search_tbl =
-                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-       struct iwl_rate_scale_data *window = &(tbl->win[index]);
-       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-       u8 start_action;
-       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
-       u8 tx_chains_num = priv->hw_params.tx_chains_num;
-       int ret = 0;
-       u8 update_search_tbl_counter = 0;
-
-       tbl->action = IWL_LEGACY_SWITCH_SISO;
-
-       start_action = tbl->action;
-       for (; ;) {
-               lq_sta->action_counter++;
-               switch (tbl->action) {
-               case IWL_LEGACY_SWITCH_ANTENNA1:
-               case IWL_LEGACY_SWITCH_ANTENNA2:
-                       IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
-
-                       if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
-                                                       tx_chains_num <= 1) ||
-                           (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
-                                                       tx_chains_num <= 2))
-                               break;
-
-                       /* Don't change antenna if success has been great */
-                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-                               break;
-
-                       /* Set up search table to try other antenna */
-                       memcpy(search_tbl, tbl, sz);
-
-                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
-                               &search_tbl->current_rate, search_tbl)) {
-                               update_search_tbl_counter = 1;
-                               iwl4965_rs_set_expected_tpt_table(lq_sta,
-                                                               search_tbl);
-                               goto out;
-                       }
-                       break;
-               case IWL_LEGACY_SWITCH_SISO:
-                       IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
-
-                       /* Set up search table to try SISO */
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-                       ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
-                                                search_tbl, index);
-                       if (!ret) {
-                               lq_sta->action_counter = 0;
-                               goto out;
-                       }
-
-                       break;
-               case IWL_LEGACY_SWITCH_MIMO2_AB:
-               case IWL_LEGACY_SWITCH_MIMO2_AC:
-               case IWL_LEGACY_SWITCH_MIMO2_BC:
-                       IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
-
-                       /* Set up search table to try MIMO */
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-
-                       if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
-                               search_tbl->ant_type = ANT_AB;
-                       else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
-                               search_tbl->ant_type = ANT_AC;
-                       else
-                               search_tbl->ant_type = ANT_BC;
-
-                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
-                                               search_tbl->ant_type))
-                               break;
-
-                       ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
-                                               conf, sta,
-                                                search_tbl, index);
-                       if (!ret) {
-                               lq_sta->action_counter = 0;
-                               goto out;
-                       }
-                       break;
-               }
-               tbl->action++;
-               if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
-                       tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-
-               if (tbl->action == start_action)
-                       break;
-
-       }
-       search_tbl->lq_type = LQ_NONE;
-       return 0;
-
-out:
-       lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
-               tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-       if (update_search_tbl_counter)
-               search_tbl->action = tbl->action;
-       return 0;
-
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
-{
-       u8 is_green = lq_sta->is_green;
-       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-       struct iwl_scale_tbl_info *search_tbl =
-                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-       struct iwl_rate_scale_data *window = &(tbl->win[index]);
-       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-       u8 start_action;
-       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
-       u8 tx_chains_num = priv->hw_params.tx_chains_num;
-       u8 update_search_tbl_counter = 0;
-       int ret;
-
-       start_action = tbl->action;
-
-       for (;;) {
-               lq_sta->action_counter++;
-               switch (tbl->action) {
-               case IWL_SISO_SWITCH_ANTENNA1:
-               case IWL_SISO_SWITCH_ANTENNA2:
-                       IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
-                       if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
-                                               tx_chains_num <= 1) ||
-                           (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
-                                               tx_chains_num <= 2))
-                               break;
-
-                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-                               break;
-
-                       memcpy(search_tbl, tbl, sz);
-                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
-                                      &search_tbl->current_rate, search_tbl)) {
-                               update_search_tbl_counter = 1;
-                               goto out;
-                       }
-                       break;
-               case IWL_SISO_SWITCH_MIMO2_AB:
-               case IWL_SISO_SWITCH_MIMO2_AC:
-               case IWL_SISO_SWITCH_MIMO2_BC:
-                       IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = 0;
-
-                       if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
-                               search_tbl->ant_type = ANT_AB;
-                       else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
-                               search_tbl->ant_type = ANT_AC;
-                       else
-                               search_tbl->ant_type = ANT_BC;
-
-                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
-                                                search_tbl->ant_type))
-                               break;
-
-                       ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
-                                               conf, sta,
-                                                search_tbl, index);
-                       if (!ret)
-                               goto out;
-                       break;
-               case IWL_SISO_SWITCH_GI:
-                       if (!tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_20))
-                               break;
-                       if (tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_40))
-                               break;
-
-                       IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
-
-                       memcpy(search_tbl, tbl, sz);
-                       if (is_green) {
-                               if (!tbl->is_SGI)
-                                       break;
-                               else
-                                       IWL_ERR(priv,
-                                               "SGI was set in GF+SISO\n");
-                       }
-                       search_tbl->is_SGI = !tbl->is_SGI;
-                       iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
-                       if (tbl->is_SGI) {
-                               s32 tpt = lq_sta->last_tpt / 100;
-                               if (tpt >= search_tbl->expected_tpt[index])
-                                       break;
-                       }
-                       search_tbl->current_rate =
-                               iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
-                                                     index, is_green);
-                       update_search_tbl_counter = 1;
-                       goto out;
-               }
-               tbl->action++;
-               if (tbl->action > IWL_SISO_SWITCH_GI)
-                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-
-               if (tbl->action == start_action)
-                       break;
-       }
-       search_tbl->lq_type = LQ_NONE;
-       return 0;
-
- out:
-       lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_SISO_SWITCH_GI)
-               tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-       if (update_search_tbl_counter)
-               search_tbl->action = tbl->action;
-
-       return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
-{
-       s8 is_green = lq_sta->is_green;
-       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-       struct iwl_scale_tbl_info *search_tbl =
-                               &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-       struct iwl_rate_scale_data *window = &(tbl->win[index]);
-       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-                 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
-       u8 start_action;
-       u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
-       u8 tx_chains_num = priv->hw_params.tx_chains_num;
-       u8 update_search_tbl_counter = 0;
-       int ret;
-
-       start_action = tbl->action;
-       for (;;) {
-               lq_sta->action_counter++;
-               switch (tbl->action) {
-               case IWL_MIMO2_SWITCH_ANTENNA1:
-               case IWL_MIMO2_SWITCH_ANTENNA2:
-                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
-
-                       if (tx_chains_num <= 2)
-                               break;
-
-                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-                               break;
-
-                       memcpy(search_tbl, tbl, sz);
-                       if (iwl4965_rs_toggle_antenna(valid_tx_ant,
-                                      &search_tbl->current_rate, search_tbl)) {
-                               update_search_tbl_counter = 1;
-                               goto out;
-                       }
-                       break;
-               case IWL_MIMO2_SWITCH_SISO_A:
-               case IWL_MIMO2_SWITCH_SISO_B:
-               case IWL_MIMO2_SWITCH_SISO_C:
-                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
-
-                       /* Set up new search table for SISO */
-                       memcpy(search_tbl, tbl, sz);
-
-                       if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
-                               search_tbl->ant_type = ANT_A;
-                       else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
-                               search_tbl->ant_type = ANT_B;
-                       else
-                               search_tbl->ant_type = ANT_C;
-
-                       if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
-                                               search_tbl->ant_type))
-                               break;
-
-                       ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
-                                               conf, sta,
-                                                search_tbl, index);
-                       if (!ret)
-                               goto out;
-
-                       break;
-
-               case IWL_MIMO2_SWITCH_GI:
-                       if (!tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_20))
-                               break;
-                       if (tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_40))
-                               break;
-
-                       IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
-
-                       /* Set up new search table for MIMO2 */
-                       memcpy(search_tbl, tbl, sz);
-                       search_tbl->is_SGI = !tbl->is_SGI;
-                       iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
-                       /*
-                        * If active table already uses the fastest possible
-                        * modulation (dual stream with short guard interval),
-                        * and it's working well, there's no need to look
-                        * for a better type of modulation!
-                        */
-                       if (tbl->is_SGI) {
-                               s32 tpt = lq_sta->last_tpt / 100;
-                               if (tpt >= search_tbl->expected_tpt[index])
-                                       break;
-                       }
-                       search_tbl->current_rate =
-                               iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
-                                                     index, is_green);
-                       update_search_tbl_counter = 1;
-                       goto out;
-
-               }
-               tbl->action++;
-               if (tbl->action > IWL_MIMO2_SWITCH_GI)
-                       tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-
-               if (tbl->action == start_action)
-                       break;
-       }
-       search_tbl->lq_type = LQ_NONE;
-       return 0;
- out:
-       lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_MIMO2_SWITCH_GI)
-               tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-       if (update_search_tbl_counter)
-               search_tbl->action = tbl->action;
-
-       return 0;
-
-}
-
-/*
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
- */
-static void
-iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
-{
-       struct iwl_scale_tbl_info *tbl;
-       int i;
-       int active_tbl;
-       int flush_interval_passed = 0;
-       struct iwl_priv *priv;
-
-       priv = lq_sta->drv;
-       active_tbl = lq_sta->active_tbl;
-
-       tbl = &(lq_sta->lq_info[active_tbl]);
-
-       /* If we've been disallowing search, see if we should now allow it */
-       if (lq_sta->stay_in_tbl) {
-
-               /* Elapsed time using current modulation mode */
-               if (lq_sta->flush_timer)
-                       flush_interval_passed =
-                       time_after(jiffies,
-                                       (unsigned long)(lq_sta->flush_timer +
-                                       IWL_RATE_SCALE_FLUSH_INTVL));
-
-               /*
-                * Check if we should allow search for new modulation mode.
-                * If many frames have failed or succeeded, or we've used
-                * this same modulation for a long time, allow search, and
-                * reset history stats that keep track of whether we should
-                * allow a new search.  Also (below) reset all bitmaps and
-                * stats in active history.
-                */
-               if (force_search ||
-                   (lq_sta->total_failed > lq_sta->max_failure_limit) ||
-                   (lq_sta->total_success > lq_sta->max_success_limit) ||
-                   ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
-                    && (flush_interval_passed))) {
-                       IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
-                                    lq_sta->total_failed,
-                                    lq_sta->total_success,
-                                    flush_interval_passed);
-
-                       /* Allow search for new mode */
-                       lq_sta->stay_in_tbl = 0;        /* only place reset */
-                       lq_sta->total_failed = 0;
-                       lq_sta->total_success = 0;
-                       lq_sta->flush_timer = 0;
-
-               /*
-                * Else if we've used this modulation mode enough repetitions
-                * (regardless of elapsed time or success/failure), reset
-                * history bitmaps and rate-specific stats for all rates in
-                * active table.
-                */
-               } else {
-                       lq_sta->table_count++;
-                       if (lq_sta->table_count >=
-                           lq_sta->table_count_limit) {
-                               lq_sta->table_count = 0;
-
-                               IWL_DEBUG_RATE(priv,
-                                       "LQ: stay in table clear win\n");
-                               for (i = 0; i < IWL_RATE_COUNT; i++)
-                                       iwl4965_rs_rate_scale_clear_window(
-                                               &(tbl->win[i]));
-                       }
-               }
-
-               /* If transitioning to allow "search", reset all history
-                * bitmaps and stats in active table (this will become the new
-                * "search" table). */
-               if (!lq_sta->stay_in_tbl) {
-                       for (i = 0; i < IWL_RATE_COUNT; i++)
-                               iwl4965_rs_rate_scale_clear_window(
-                                                       &(tbl->win[i]));
-               }
-       }
-}
-
-/*
- * setup rate table in uCode
- * return rate_n_flags as used in the table
- */
-static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
-                             struct iwl_rxon_context *ctx,
-                               struct iwl_lq_sta *lq_sta,
-                               struct iwl_scale_tbl_info *tbl,
-                               int index, u8 is_green)
-{
-       u32 rate;
-
-       /* Update uCode's rate table. */
-       rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
-       iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
-       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
-
-       return rate;
-}
-
-/*
- * Do rate scaling and search for new modulation mode.
- */
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
-                                 struct sk_buff *skb,
-                                 struct ieee80211_sta *sta,
-                                 struct iwl_lq_sta *lq_sta)
-{
-       struct ieee80211_hw *hw = priv->hw;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       int low = IWL_RATE_INVALID;
-       int high = IWL_RATE_INVALID;
-       int index;
-       int i;
-       struct iwl_rate_scale_data *window = NULL;
-       int current_tpt = IWL_INVALID_VALUE;
-       int low_tpt = IWL_INVALID_VALUE;
-       int high_tpt = IWL_INVALID_VALUE;
-       u32 fail_count;
-       s8 scale_action = 0;
-       u16 rate_mask;
-       u8 update_lq = 0;
-       struct iwl_scale_tbl_info *tbl, *tbl1;
-       u16 rate_scale_index_msk = 0;
-       u32 rate;
-       u8 is_green = 0;
-       u8 active_tbl = 0;
-       u8 done_search = 0;
-       u16 high_low;
-       s32 sr;
-       u8 tid = MAX_TID_COUNT;
-       struct iwl_tid_data *tid_data;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-       IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
-
-       /* Send management frames and NO_ACK data using lowest rate. */
-       /* TODO: this could probably be improved.. */
-       if (!ieee80211_is_data(hdr->frame_control) ||
-           info->flags & IEEE80211_TX_CTL_NO_ACK)
-               return;
-
-       if (!sta || !lq_sta)
-               return;
-
-       lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
-       tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
-       if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
-               tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
-               if (tid_data->agg.state == IWL_AGG_OFF)
-                       lq_sta->is_agg = 0;
-               else
-                       lq_sta->is_agg = 1;
-       } else
-               lq_sta->is_agg = 0;
-
-       /*
-        * Select rate-scale / modulation-mode table to work with in
-        * the rest of this function:  "search" if searching for better
-        * modulation mode, or "active" if doing rate scaling within a mode.
-        */
-       if (!lq_sta->search_better_tbl)
-               active_tbl = lq_sta->active_tbl;
-       else
-               active_tbl = 1 - lq_sta->active_tbl;
-
-       tbl = &(lq_sta->lq_info[active_tbl]);
-       if (is_legacy(tbl->lq_type))
-               lq_sta->is_green = 0;
-       else
-               lq_sta->is_green = iwl4965_rs_use_green(sta);
-       is_green = lq_sta->is_green;
-
-       /* current tx rate */
-       index = lq_sta->last_txrate_idx;
-
-       IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
-                      tbl->lq_type);
-
-       /* rates available for this association, and for modulation mode */
-       rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
-
-       IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
-
-       /* mask with station rate restriction */
-       if (is_legacy(tbl->lq_type)) {
-               if (lq_sta->band == IEEE80211_BAND_5GHZ)
-                       /* supp_rates has no CCK bits in A mode */
-                       rate_scale_index_msk = (u16) (rate_mask &
-                               (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
-               else
-                       rate_scale_index_msk = (u16) (rate_mask &
-                                                     lq_sta->supp_rates);
-
-       } else
-               rate_scale_index_msk = rate_mask;
-
-       if (!rate_scale_index_msk)
-               rate_scale_index_msk = rate_mask;
-
-       if (!((1 << index) & rate_scale_index_msk)) {
-               IWL_ERR(priv, "Current Rate is not valid\n");
-               if (lq_sta->search_better_tbl) {
-                       /* revert to active table if search table is not valid*/
-                       tbl->lq_type = LQ_NONE;
-                       lq_sta->search_better_tbl = 0;
-                       tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-                       /* get "active" rate info */
-                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-                       rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
-                                                 tbl, index, is_green);
-               }
-               return;
-       }
-
-       /* Get expected throughput table and history window for current rate */
-       if (!tbl->expected_tpt) {
-               IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
-               return;
-       }
-
-       /* force user max rate if set by user */
-       if ((lq_sta->max_rate_idx != -1) &&
-           (lq_sta->max_rate_idx < index)) {
-               index = lq_sta->max_rate_idx;
-               update_lq = 1;
-               window = &(tbl->win[index]);
-               goto lq_update;
-       }
-
-       window = &(tbl->win[index]);
-
-       /*
-        * If there is not enough history to calculate actual average
-        * throughput, keep analyzing results of more tx frames, without
-        * changing rate or mode (bypass most of the rest of this function).
-        * Set up new rate table in uCode only if old rate is not supported
-        * in current association (use new rate found above).
-        */
-       fail_count = window->counter - window->success_counter;
-       if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
-                       (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
-               IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
-                              "for index %d\n",
-                              window->success_counter, window->counter, index);
-
-               /* Can't calculate this yet; not enough history */
-               window->average_tpt = IWL_INVALID_VALUE;
-
-               /* Should we stay with this modulation mode,
-                * or search for a new one? */
-               iwl4965_rs_stay_in_table(lq_sta, false);
-
-               goto out;
-       }
-       /* Else we have enough samples; calculate estimate of
-        * actual average throughput */
-       if (window->average_tpt != ((window->success_ratio *
-                       tbl->expected_tpt[index] + 64) / 128)) {
-               IWL_ERR(priv,
-                        "expected_tpt should have been calculated by now\n");
-               window->average_tpt = ((window->success_ratio *
-                                       tbl->expected_tpt[index] + 64) / 128);
-       }
-
-       /* If we are searching for better modulation mode, check success. */
-       if (lq_sta->search_better_tbl) {
-               /* If good success, continue using the "search" mode;
-                * no need to send new link quality command, since we're
-                * continuing to use the setup that we've been trying. */
-               if (window->average_tpt > lq_sta->last_tpt) {
-
-                       IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
-                                       "suc=%d cur-tpt=%d old-tpt=%d\n",
-                                       window->success_ratio,
-                                       window->average_tpt,
-                                       lq_sta->last_tpt);
-
-                       if (!is_legacy(tbl->lq_type))
-                               lq_sta->enable_counter = 1;
-
-                       /* Swap tables; "search" becomes "active" */
-                       lq_sta->active_tbl = active_tbl;
-                       current_tpt = window->average_tpt;
-
-               /* Else poor success; go back to mode in "active" table */
-               } else {
-
-                       IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
-                                       "suc=%d cur-tpt=%d old-tpt=%d\n",
-                                       window->success_ratio,
-                                       window->average_tpt,
-                                       lq_sta->last_tpt);
-
-                       /* Nullify "search" table */
-                       tbl->lq_type = LQ_NONE;
-
-                       /* Revert to "active" table */
-                       active_tbl = lq_sta->active_tbl;
-                       tbl = &(lq_sta->lq_info[active_tbl]);
-
-                       /* Revert to "active" rate and throughput info */
-                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-                       current_tpt = lq_sta->last_tpt;
-
-                       /* Need to set up a new rate table in uCode */
-                       update_lq = 1;
-               }
-
-               /* Either way, we've made a decision; modulation mode
-                * search is done, allow rate adjustment next time. */
-               lq_sta->search_better_tbl = 0;
-               done_search = 1;        /* Don't switch modes below! */
-               goto lq_update;
-       }
-
-       /* (Else) not in search of better modulation mode, try for better
-        * starting rate, while staying in this mode. */
-       high_low = iwl4965_rs_get_adjacent_rate(priv, index,
-                                       rate_scale_index_msk,
-                                       tbl->lq_type);
-       low = high_low & 0xff;
-       high = (high_low >> 8) & 0xff;
-
-       /* If user set max rate, dont allow higher than user constrain */
-       if ((lq_sta->max_rate_idx != -1) &&
-           (lq_sta->max_rate_idx < high))
-               high = IWL_RATE_INVALID;
-
-       sr = window->success_ratio;
-
-       /* Collect measured throughputs for current and adjacent rates */
-       current_tpt = window->average_tpt;
-       if (low != IWL_RATE_INVALID)
-               low_tpt = tbl->win[low].average_tpt;
-       if (high != IWL_RATE_INVALID)
-               high_tpt = tbl->win[high].average_tpt;
-
-       scale_action = 0;
-
-       /* Too many failures, decrease rate */
-       if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
-               IWL_DEBUG_RATE(priv,
-                       "decrease rate because of low success_ratio\n");
-               scale_action = -1;
-
-       /* No throughput measured yet for adjacent rates; try increase. */
-       } else if ((low_tpt == IWL_INVALID_VALUE) &&
-                  (high_tpt == IWL_INVALID_VALUE)) {
-
-               if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
-                       scale_action = 1;
-               else if (low != IWL_RATE_INVALID)
-                       scale_action = 0;
-       }
-
-       /* Both adjacent throughputs are measured, but neither one has better
-        * throughput; we're using the best rate, don't change it! */
-       else if ((low_tpt != IWL_INVALID_VALUE) &&
-                (high_tpt != IWL_INVALID_VALUE) &&
-                (low_tpt < current_tpt) &&
-                (high_tpt < current_tpt))
-               scale_action = 0;
-
-       /* At least one adjacent rate's throughput is measured,
-        * and may have better performance. */
-       else {
-               /* Higher adjacent rate's throughput is measured */
-               if (high_tpt != IWL_INVALID_VALUE) {
-                       /* Higher rate has better throughput */
-                       if (high_tpt > current_tpt &&
-                                       sr >= IWL_RATE_INCREASE_TH) {
-                               scale_action = 1;
-                       } else {
-                               scale_action = 0;
-                       }
-
-               /* Lower adjacent rate's throughput is measured */
-               } else if (low_tpt != IWL_INVALID_VALUE) {
-                       /* Lower rate has better throughput */
-                       if (low_tpt > current_tpt) {
-                               IWL_DEBUG_RATE(priv,
-                                   "decrease rate because of low tpt\n");
-                               scale_action = -1;
-                       } else if (sr >= IWL_RATE_INCREASE_TH) {
-                               scale_action = 1;
-                       }
-               }
-       }
-
-       /* Sanity check; asked for decrease, but success rate or throughput
-        * has been good at old rate.  Don't change it. */
-       if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
-                   ((sr > IWL_RATE_HIGH_TH) ||
-                    (current_tpt > (100 * tbl->expected_tpt[low]))))
-               scale_action = 0;
-
-       switch (scale_action) {
-       case -1:
-               /* Decrease starting rate, update uCode's rate table */
-               if (low != IWL_RATE_INVALID) {
-                       update_lq = 1;
-                       index = low;
-               }
-
-               break;
-       case 1:
-               /* Increase starting rate, update uCode's rate table */
-               if (high != IWL_RATE_INVALID) {
-                       update_lq = 1;
-                       index = high;
-               }
-
-               break;
-       case 0:
-               /* No change */
-       default:
-               break;
-       }
-
-       IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
-                   "high %d type %d\n",
-                    index, scale_action, low, high, tbl->lq_type);
-
-lq_update:
-       /* Replace uCode's rate table for the destination station. */
-       if (update_lq)
-               rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
-                                         tbl, index, is_green);
-
-       /* Should we stay with this modulation mode,
-        * or search for a new one? */
-        iwl4965_rs_stay_in_table(lq_sta, false);
-
-       /*
-        * Search for new modulation mode if we're:
-        * 1)  Not changing rates right now
-        * 2)  Not just finishing up a search
-        * 3)  Allowing a new search
-        */
-       if (!update_lq && !done_search &&
-               !lq_sta->stay_in_tbl && window->counter) {
-               /* Save current throughput to compare with "search" throughput*/
-               lq_sta->last_tpt = current_tpt;
-
-               /* Select a new "search" modulation mode to try.
-                * If one is found, set up the new "search" table. */
-               if (is_legacy(tbl->lq_type))
-                       iwl4965_rs_move_legacy_other(priv, lq_sta,
-                                                       conf, sta, index);
-               else if (is_siso(tbl->lq_type))
-                       iwl4965_rs_move_siso_to_other(priv, lq_sta,
-                                                       conf, sta, index);
-               else /* (is_mimo2(tbl->lq_type)) */
-                       iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
-                                                       conf, sta, index);
-
-               /* If new "search" mode was selected, set up in uCode table */
-               if (lq_sta->search_better_tbl) {
-                       /* Access the "search" table, clear its history. */
-                       tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-                       for (i = 0; i < IWL_RATE_COUNT; i++)
-                               iwl4965_rs_rate_scale_clear_window(
-                                                       &(tbl->win[i]));
-
-                       /* Use new "search" start rate */
-                       index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-
-                       IWL_DEBUG_RATE(priv,
-                               "Switch current  mcs: %X index: %d\n",
-                                    tbl->current_rate, index);
-                       iwl4965_rs_fill_link_cmd(priv, lq_sta,
-                                               tbl->current_rate);
-                       iwl_legacy_send_lq_cmd(priv, ctx,
-                                               &lq_sta->lq, CMD_ASYNC, false);
-               } else
-                       done_search = 1;
-       }
-
-       if (done_search && !lq_sta->stay_in_tbl) {
-               /* If the "active" (non-search) mode was legacy,
-                * and we've tried switching antennas,
-                * but we haven't been able to try HT modes (not available),
-                * stay with best antenna legacy modulation for a while
-                * before next round of mode comparisons. */
-               tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
-               if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
-                   lq_sta->action_counter > tbl1->max_search) {
-                       IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
-                       iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
-               }
-
-               /* If we're in an HT mode, and all 3 mode switch actions
-                * have been tried and compared, stay in this best modulation
-                * mode for a while before next round of mode comparisons. */
-               if (lq_sta->enable_counter &&
-                   (lq_sta->action_counter >= tbl1->max_search)) {
-                       if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
-                           (lq_sta->tx_agg_tid_en & (1 << tid)) &&
-                           (tid != MAX_TID_COUNT)) {
-                               tid_data =
-                                  &priv->stations[lq_sta->lq.sta_id].tid[tid];
-                               if (tid_data->agg.state == IWL_AGG_OFF) {
-                                       IWL_DEBUG_RATE(priv,
-                                                      "try to aggregate tid %d\n",
-                                                      tid);
-                                       iwl4965_rs_tl_turn_on_agg(priv, tid,
-                                                         lq_sta, sta);
-                               }
-                       }
-                       iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
-               }
-       }
-
-out:
-       tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
-                                                       index, is_green);
-       i = index;
-       lq_sta->last_txrate_idx = i;
-}
-
-/**
- * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values.  These will be replaced later
- *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
- *       rc80211_simple.
- *
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- *       which requires station table entry to exist).
- */
-static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
-                            struct ieee80211_conf *conf,
-                            struct ieee80211_sta *sta,
-                            struct iwl_lq_sta *lq_sta)
-{
-       struct iwl_scale_tbl_info *tbl;
-       int rate_idx;
-       int i;
-       u32 rate;
-       u8 use_green = iwl4965_rs_use_green(sta);
-       u8 active_tbl = 0;
-       u8 valid_tx_ant;
-       struct iwl_station_priv *sta_priv;
-       struct iwl_rxon_context *ctx;
-
-       if (!sta || !lq_sta)
-               return;
-
-       sta_priv = (void *)sta->drv_priv;
-       ctx = sta_priv->common.ctx;
-
-       i = lq_sta->last_txrate_idx;
-
-       valid_tx_ant = priv->hw_params.valid_tx_ant;
-
-       if (!lq_sta->search_better_tbl)
-               active_tbl = lq_sta->active_tbl;
-       else
-               active_tbl = 1 - lq_sta->active_tbl;
-
-       tbl = &(lq_sta->lq_info[active_tbl]);
-
-       if ((i < 0) || (i >= IWL_RATE_COUNT))
-               i = 0;
-
-       rate = iwlegacy_rates[i].plcp;
-       tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
-       rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
-       if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
-               rate |= RATE_MCS_CCK_MSK;
-
-       iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
-       if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
-               iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
-
-       rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
-       tbl->current_rate = rate;
-       iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-       iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
-       priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
-       iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
-}
-
-static void
-iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
-                       struct ieee80211_tx_rate_control *txrc)
-{
-
-       struct sk_buff *skb = txrc->skb;
-       struct ieee80211_supported_band *sband = txrc->sband;
-       struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct iwl_lq_sta *lq_sta = priv_sta;
-       int rate_idx;
-
-       IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
-
-       /* Get max rate if user set max rate */
-       if (lq_sta) {
-               lq_sta->max_rate_idx = txrc->max_rate_idx;
-               if ((sband->band == IEEE80211_BAND_5GHZ) &&
-                   (lq_sta->max_rate_idx != -1))
-                       lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
-               if ((lq_sta->max_rate_idx < 0) ||
-                   (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
-                       lq_sta->max_rate_idx = -1;
-       }
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (lq_sta && !lq_sta->drv) {
-               IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
-               priv_sta = NULL;
-       }
-
-       /* Send management frames and NO_ACK data using lowest rate. */
-       if (rate_control_send_low(sta, priv_sta, txrc))
-               return;
-
-       if (!lq_sta)
-               return;
-
-       rate_idx  = lq_sta->last_txrate_idx;
-
-       if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
-               rate_idx -= IWL_FIRST_OFDM_RATE;
-               /* 6M and 9M shared same MCS index */
-               rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
-               if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
-                        IWL_RATE_MIMO2_6M_PLCP)
-                       rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
-               info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
-               if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
-                       info->control.rates[0].flags |=
-                                       IEEE80211_TX_RC_SHORT_GI;
-               if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
-                       info->control.rates[0].flags |=
-                                       IEEE80211_TX_RC_DUP_DATA;
-               if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
-                       info->control.rates[0].flags |=
-                                       IEEE80211_TX_RC_40_MHZ_WIDTH;
-               if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
-                       info->control.rates[0].flags |=
-                                       IEEE80211_TX_RC_GREEN_FIELD;
-       } else {
-               /* Check for invalid rates */
-               if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
-                               ((sband->band == IEEE80211_BAND_5GHZ) &&
-                                (rate_idx < IWL_FIRST_OFDM_RATE)))
-                       rate_idx = rate_lowest_index(sband, sta);
-               /* On valid 5 GHz rate, adjust index */
-               else if (sband->band == IEEE80211_BAND_5GHZ)
-                       rate_idx -= IWL_FIRST_OFDM_RATE;
-               info->control.rates[0].flags = 0;
-       }
-       info->control.rates[0].idx = rate_idx;
-
-}
-
-static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
-                         gfp_t gfp)
-{
-       struct iwl_lq_sta *lq_sta;
-       struct iwl_station_priv *sta_priv =
-                               (struct iwl_station_priv *) sta->drv_priv;
-       struct iwl_priv *priv;
-
-       priv = (struct iwl_priv *)priv_rate;
-       IWL_DEBUG_RATE(priv, "create station rate scale window\n");
-
-       lq_sta = &sta_priv->lq_sta;
-
-       return lq_sta;
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void
-iwl4965_rs_rate_init(struct iwl_priv *priv,
-                       struct ieee80211_sta *sta,
-                       u8 sta_id)
-{
-       int i, j;
-       struct ieee80211_hw *hw = priv->hw;
-       struct ieee80211_conf *conf = &priv->hw->conf;
-       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       struct iwl_station_priv *sta_priv;
-       struct iwl_lq_sta *lq_sta;
-       struct ieee80211_supported_band *sband;
-
-       sta_priv = (struct iwl_station_priv *) sta->drv_priv;
-       lq_sta = &sta_priv->lq_sta;
-       sband = hw->wiphy->bands[conf->channel->band];
-
-
-       lq_sta->lq.sta_id = sta_id;
-
-       for (j = 0; j < LQ_SIZE; j++)
-               for (i = 0; i < IWL_RATE_COUNT; i++)
-                       iwl4965_rs_rate_scale_clear_window(
-                                       &lq_sta->lq_info[j].win[i]);
-
-       lq_sta->flush_timer = 0;
-       lq_sta->supp_rates = sta->supp_rates[sband->band];
-       for (j = 0; j < LQ_SIZE; j++)
-               for (i = 0; i < IWL_RATE_COUNT; i++)
-                       iwl4965_rs_rate_scale_clear_window(
-                                       &lq_sta->lq_info[j].win[i]);
-
-       IWL_DEBUG_RATE(priv, "LQ:"
-                       "*** rate scale station global init for station %d ***\n",
-                      sta_id);
-       /* TODO: what is a good starting rate for STA? About middle? Maybe not
-        * the lowest or the highest rate.. Could consider using RSSI from
-        * previous packets? Need to have IEEE 802.1X auth succeed immediately
-        * after assoc.. */
-
-       lq_sta->is_dup = 0;
-       lq_sta->max_rate_idx = -1;
-       lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
-       lq_sta->is_green = iwl4965_rs_use_green(sta);
-       lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
-       lq_sta->band = priv->band;
-       /*
-        * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
-        * supp_rates[] does not; shift to convert format, force 9 MBits off.
-        */
-       lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
-       lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
-       lq_sta->active_siso_rate &= ~((u16)0x2);
-       lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
-       /* Same here */
-       lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
-       lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
-       lq_sta->active_mimo2_rate &= ~((u16)0x2);
-       lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
-       /* These values will be overridden later */
-       lq_sta->lq.general_params.single_stream_ant_msk =
-               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-       lq_sta->lq.general_params.dual_stream_ant_msk =
-               priv->hw_params.valid_tx_ant &
-               ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-       if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
-               lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
-       } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
-               lq_sta->lq.general_params.dual_stream_ant_msk =
-                       priv->hw_params.valid_tx_ant;
-       }
-
-       /* as default allow aggregation for all tids */
-       lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
-       lq_sta->drv = priv;
-
-       /* Set last_txrate_idx to lowest rate */
-       lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
-       if (sband->band == IEEE80211_BAND_5GHZ)
-               lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
-       lq_sta->is_agg = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       lq_sta->dbg_fixed_rate = 0;
-#endif
-
-       iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
-}
-
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
-                            struct iwl_lq_sta *lq_sta, u32 new_rate)
-{
-       struct iwl_scale_tbl_info tbl_type;
-       int index = 0;
-       int rate_idx;
-       int repeat_rate = 0;
-       u8 ant_toggle_cnt = 0;
-       u8 use_ht_possible = 1;
-       u8 valid_tx_ant = 0;
-       struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
-
-       /* Override starting rate (index 0) if needed for debug purposes */
-       iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
-       /* Interpret new_rate (rate_n_flags) */
-       iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
-                                 &tbl_type, &rate_idx);
-
-       /* How many times should we repeat the initial rate? */
-       if (is_legacy(tbl_type.lq_type)) {
-               ant_toggle_cnt = 1;
-               repeat_rate = IWL_NUMBER_TRY;
-       } else {
-               repeat_rate = IWL_HT_NUMBER_TRY;
-       }
-
-       lq_cmd->general_params.mimo_delimiter =
-                       is_mimo(tbl_type.lq_type) ? 1 : 0;
-
-       /* Fill 1st table entry (index 0) */
-       lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
-       if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
-               lq_cmd->general_params.single_stream_ant_msk =
-                                               tbl_type.ant_type;
-       } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
-               lq_cmd->general_params.dual_stream_ant_msk =
-                                               tbl_type.ant_type;
-       } /* otherwise we don't modify the existing value */
-
-       index++;
-       repeat_rate--;
-       if (priv)
-               valid_tx_ant = priv->hw_params.valid_tx_ant;
-
-       /* Fill rest of rate table */
-       while (index < LINK_QUAL_MAX_RETRY_NUM) {
-               /* Repeat initial/next rate.
-                * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
-                * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
-               while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
-                       if (is_legacy(tbl_type.lq_type)) {
-                               if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
-                                       ant_toggle_cnt++;
-                               else if (priv &&
-                                        iwl4965_rs_toggle_antenna(valid_tx_ant,
-                                                       &new_rate, &tbl_type))
-                                       ant_toggle_cnt = 1;
-                       }
-
-                       /* Override next rate if needed for debug purposes */
-                       iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
-                       /* Fill next table entry */
-                       lq_cmd->rs_table[index].rate_n_flags =
-                                       cpu_to_le32(new_rate);
-                       repeat_rate--;
-                       index++;
-               }
-
-               iwl4965_rs_get_tbl_info_from_mcs(new_rate,
-                                               lq_sta->band, &tbl_type,
-                                               &rate_idx);
-
-               /* Indicate to uCode which entries might be MIMO.
-                * If initial rate was MIMO, this will finally end up
-                * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
-               if (is_mimo(tbl_type.lq_type))
-                       lq_cmd->general_params.mimo_delimiter = index;
-
-               /* Get next rate */
-               new_rate = iwl4965_rs_get_lower_rate(lq_sta,
-                                       &tbl_type, rate_idx,
-                                            use_ht_possible);
-
-               /* How many times should we repeat the next rate? */
-               if (is_legacy(tbl_type.lq_type)) {
-                       if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
-                               ant_toggle_cnt++;
-                       else if (priv &&
-                                iwl4965_rs_toggle_antenna(valid_tx_ant,
-                                                  &new_rate, &tbl_type))
-                               ant_toggle_cnt = 1;
-
-                       repeat_rate = IWL_NUMBER_TRY;
-               } else {
-                       repeat_rate = IWL_HT_NUMBER_TRY;
-               }
-
-               /* Don't allow HT rates after next pass.
-                * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
-               use_ht_possible = 0;
-
-               /* Override next rate if needed for debug purposes */
-               iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
-               /* Fill next table entry */
-               lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
-               index++;
-               repeat_rate--;
-       }
-
-       lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-       lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-
-       lq_cmd->agg_params.agg_time_limit =
-               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-}
-
-static void
-*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
-       return hw->priv;
-}
-/* rate scale requires free function to be implemented */
-static void iwl4965_rs_free(void *priv_rate)
-{
-       return;
-}
-
-static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
-                       void *priv_sta)
-{
-       struct iwl_priv *priv __maybe_unused = priv_r;
-
-       IWL_DEBUG_RATE(priv, "enter\n");
-       IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
-{
-       file->private_data = inode->i_private;
-       return 0;
-}
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
-                            u32 *rate_n_flags, int index)
-{
-       struct iwl_priv *priv;
-       u8 valid_tx_ant;
-       u8 ant_sel_tx;
-
-       priv = lq_sta->drv;
-       valid_tx_ant = priv->hw_params.valid_tx_ant;
-       if (lq_sta->dbg_fixed_rate) {
-               ant_sel_tx =
-                 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
-                 >> RATE_MCS_ANT_POS);
-               if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
-                       *rate_n_flags = lq_sta->dbg_fixed_rate;
-                       IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
-               } else {
-                       lq_sta->dbg_fixed_rate = 0;
-                       IWL_ERR(priv,
-                           "Invalid antenna selection 0x%X, Valid is 0x%X\n",
-                           ant_sel_tx, valid_tx_ant);
-                       IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
-               }
-       } else {
-               IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
-       }
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
-                       const char __user *user_buf, size_t count, loff_t *ppos)
-{
-       struct iwl_lq_sta *lq_sta = file->private_data;
-       struct iwl_priv *priv;
-       char buf[64];
-       size_t buf_size;
-       u32 parsed_rate;
-       struct iwl_station_priv *sta_priv =
-               container_of(lq_sta, struct iwl_station_priv, lq_sta);
-       struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
-       priv = lq_sta->drv;
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       if (sscanf(buf, "%x", &parsed_rate) == 1)
-               lq_sta->dbg_fixed_rate = parsed_rate;
-       else
-               lq_sta->dbg_fixed_rate = 0;
-
-       lq_sta->active_legacy_rate = 0x0FFF;    /* 1 - 54 MBits, includes CCK */
-       lq_sta->active_siso_rate   = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
-       lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
-
-       IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
-               lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
-       if (lq_sta->dbg_fixed_rate) {
-               iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
-               iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
-                               false);
-       }
-
-       return count;
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
-                       char __user *user_buf, size_t count, loff_t *ppos)
-{
-       char *buff;
-       int desc = 0;
-       int i = 0;
-       int index = 0;
-       ssize_t ret;
-
-       struct iwl_lq_sta *lq_sta = file->private_data;
-       struct iwl_priv *priv;
-       struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
-       priv = lq_sta->drv;
-       buff = kmalloc(1024, GFP_KERNEL);
-       if (!buff)
-               return -ENOMEM;
-
-       desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
-       desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
-                       lq_sta->total_failed, lq_sta->total_success,
-                       lq_sta->active_legacy_rate);
-       desc += sprintf(buff+desc, "fixed rate 0x%X\n",
-                       lq_sta->dbg_fixed_rate);
-       desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
-           (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
-           (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
-           (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
-       desc += sprintf(buff+desc, "lq type %s\n",
-          (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
-       if (is_Ht(tbl->lq_type)) {
-               desc += sprintf(buff+desc, " %s",
-                  (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
-                  desc += sprintf(buff+desc, " %s",
-                  (tbl->is_ht40) ? "40MHz" : "20MHz");
-                  desc += sprintf(buff+desc, " %s %s %s\n",
-                       (tbl->is_SGI) ? "SGI" : "",
-                  (lq_sta->is_green) ? "GF enabled" : "",
-                  (lq_sta->is_agg) ? "AGG on" : "");
-       }
-       desc += sprintf(buff+desc, "last tx rate=0x%X\n",
-               lq_sta->last_rate_n_flags);
-       desc += sprintf(buff+desc, "general:"
-               "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
-               lq_sta->lq.general_params.flags,
-               lq_sta->lq.general_params.mimo_delimiter,
-               lq_sta->lq.general_params.single_stream_ant_msk,
-               lq_sta->lq.general_params.dual_stream_ant_msk);
-
-       desc += sprintf(buff+desc, "agg:"
-                       "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
-                       le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
-                       lq_sta->lq.agg_params.agg_dis_start_th,
-                       lq_sta->lq.agg_params.agg_frame_cnt_limit);
-
-       desc += sprintf(buff+desc,
-                       "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
-                       lq_sta->lq.general_params.start_rate_index[0],
-                       lq_sta->lq.general_params.start_rate_index[1],
-                       lq_sta->lq.general_params.start_rate_index[2],
-                       lq_sta->lq.general_params.start_rate_index[3]);
-
-       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
-               index = iwl4965_hwrate_to_plcp_idx(
-                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
-               if (is_legacy(tbl->lq_type)) {
-                       desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
-                       i,
-                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
-                       iwl_rate_mcs[index].mbps);
-               } else {
-                       desc += sprintf(buff+desc,
-                       " rate[%d] 0x%X %smbps (%s)\n",
-                       i,
-                       le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
-                       iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
-               }
-       }
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-       kfree(buff);
-       return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
-       .write = iwl4965_rs_sta_dbgfs_scale_table_write,
-       .read = iwl4965_rs_sta_dbgfs_scale_table_read,
-       .open = iwl4965_open_file_generic,
-       .llseek = default_llseek,
-};
-static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
-                       char __user *user_buf, size_t count, loff_t *ppos)
-{
-       char *buff;
-       int desc = 0;
-       int i, j;
-       ssize_t ret;
-
-       struct iwl_lq_sta *lq_sta = file->private_data;
-
-       buff = kmalloc(1024, GFP_KERNEL);
-       if (!buff)
-               return -ENOMEM;
-
-       for (i = 0; i < LQ_SIZE; i++) {
-               desc += sprintf(buff+desc,
-                               "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
-                               "rate=0x%X\n",
-                               lq_sta->active_tbl == i ? "*" : "x",
-                               lq_sta->lq_info[i].lq_type,
-                               lq_sta->lq_info[i].is_SGI,
-                               lq_sta->lq_info[i].is_ht40,
-                               lq_sta->lq_info[i].is_dup,
-                               lq_sta->is_green,
-                               lq_sta->lq_info[i].current_rate);
-               for (j = 0; j < IWL_RATE_COUNT; j++) {
-                       desc += sprintf(buff+desc,
-                               "counter=%d success=%d %%=%d\n",
-                               lq_sta->lq_info[i].win[j].counter,
-                               lq_sta->lq_info[i].win[j].success_counter,
-                               lq_sta->lq_info[i].win[j].success_ratio);
-               }
-       }
-       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-       kfree(buff);
-       return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
-       .read = iwl4965_rs_sta_dbgfs_stats_table_read,
-       .open = iwl4965_open_file_generic,
-       .llseek = default_llseek,
-};
-
-static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
-                       char __user *user_buf, size_t count, loff_t *ppos)
-{
-       char buff[120];
-       int desc = 0;
-       ssize_t ret;
-
-       struct iwl_lq_sta *lq_sta = file->private_data;
-       struct iwl_priv *priv;
-       struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-
-       priv = lq_sta->drv;
-
-       if (is_Ht(tbl->lq_type))
-               desc += sprintf(buff+desc,
-                               "Bit Rate= %d Mb/s\n",
-                               tbl->expected_tpt[lq_sta->last_txrate_idx]);
-       else
-               desc += sprintf(buff+desc,
-                               "Bit Rate= %d Mb/s\n",
-                               iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-       return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
-       .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
-       .open = iwl4965_open_file_generic,
-       .llseek = default_llseek,
-};
-
-static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
-                                       struct dentry *dir)
-{
-       struct iwl_lq_sta *lq_sta = priv_sta;
-       lq_sta->rs_sta_dbgfs_scale_table_file =
-               debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
-                               lq_sta, &rs_sta_dbgfs_scale_table_ops);
-       lq_sta->rs_sta_dbgfs_stats_table_file =
-               debugfs_create_file("rate_stats_table", S_IRUSR, dir,
-                       lq_sta, &rs_sta_dbgfs_stats_table_ops);
-       lq_sta->rs_sta_dbgfs_rate_scale_data_file =
-               debugfs_create_file("rate_scale_data", S_IRUSR, dir,
-                       lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
-       lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
-               debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
-               &lq_sta->tx_agg_tid_en);
-
-}
-
-static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
-{
-       struct iwl_lq_sta *lq_sta = priv_sta;
-       debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void
-iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
-                        struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-static struct rate_control_ops rs_4965_ops = {
-       .module = NULL,
-       .name = IWL4965_RS_NAME,
-       .tx_status = iwl4965_rs_tx_status,
-       .get_rate = iwl4965_rs_get_rate,
-       .rate_init = iwl4965_rs_rate_init_stub,
-       .alloc = iwl4965_rs_alloc,
-       .free = iwl4965_rs_free,
-       .alloc_sta = iwl4965_rs_alloc_sta,
-       .free_sta = iwl4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
-       .add_sta_debugfs = iwl4965_rs_add_debugfs,
-       .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
-#endif
-};
-
-int iwl4965_rate_control_register(void)
-{
-       return ieee80211_rate_control_register(&rs_4965_ops);
-}
-
-void iwl4965_rate_control_unregister(void)
-{
-       ieee80211_rate_control_unregister(&rs_4965_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644 (file)
index 2b144bb..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_missed_beacon_notif *missed_beacon;
-
-       missed_beacon = &pkt->u.missed_beacon;
-       if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
-           priv->missed_beacon_threshold) {
-               IWL_DEBUG_CALIB(priv,
-                   "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
-                   le32_to_cpu(missed_beacon->consecutive_missed_beacons),
-                   le32_to_cpu(missed_beacon->total_missed_becons),
-                   le32_to_cpu(missed_beacon->num_recvd_beacons),
-                   le32_to_cpu(missed_beacon->num_expected_beacons));
-               if (!test_bit(STATUS_SCANNING, &priv->status))
-                       iwl4965_init_sensitivity(priv);
-       }
-}
-
-/* Calculate noise level, based on measurements during network silence just
- *   before arriving beacon.  This measurement can be done only if we know
- *   exactly when to expect beacons, therefore only when we're associated. */
-static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
-{
-       struct statistics_rx_non_phy *rx_info;
-       int num_active_rx = 0;
-       int total_silence = 0;
-       int bcn_silence_a, bcn_silence_b, bcn_silence_c;
-       int last_rx_noise;
-
-       rx_info = &(priv->_4965.statistics.rx.general);
-       bcn_silence_a =
-               le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
-       bcn_silence_b =
-               le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
-       bcn_silence_c =
-               le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
-       if (bcn_silence_a) {
-               total_silence += bcn_silence_a;
-               num_active_rx++;
-       }
-       if (bcn_silence_b) {
-               total_silence += bcn_silence_b;
-               num_active_rx++;
-       }
-       if (bcn_silence_c) {
-               total_silence += bcn_silence_c;
-               num_active_rx++;
-       }
-
-       /* Average among active antennas */
-       if (num_active_rx)
-               last_rx_noise = (total_silence / num_active_rx) - 107;
-       else
-               last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
-       IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
-                       bcn_silence_a, bcn_silence_b, bcn_silence_c,
-                       last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-/*
- *  based on the assumption of all statistics counter are in DWORD
- *  FIXME: This function is for debugging, do not deal with
- *  the case of counters roll-over.
- */
-static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
-                                       __le32 *stats)
-{
-       int i, size;
-       __le32 *prev_stats;
-       u32 *accum_stats;
-       u32 *delta, *max_delta;
-       struct statistics_general_common *general, *accum_general;
-       struct statistics_tx *tx, *accum_tx;
-
-       prev_stats = (__le32 *)&priv->_4965.statistics;
-       accum_stats = (u32 *)&priv->_4965.accum_statistics;
-       size = sizeof(struct iwl_notif_statistics);
-       general = &priv->_4965.statistics.general.common;
-       accum_general = &priv->_4965.accum_statistics.general.common;
-       tx = &priv->_4965.statistics.tx;
-       accum_tx = &priv->_4965.accum_statistics.tx;
-       delta = (u32 *)&priv->_4965.delta_statistics;
-       max_delta = (u32 *)&priv->_4965.max_delta;
-
-       for (i = sizeof(__le32); i < size;
-            i += sizeof(__le32), stats++, prev_stats++, delta++,
-            max_delta++, accum_stats++) {
-               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
-                       *delta = (le32_to_cpu(*stats) -
-                               le32_to_cpu(*prev_stats));
-                       *accum_stats += *delta;
-                       if (*delta > *max_delta)
-                               *max_delta = *delta;
-               }
-       }
-
-       /* reset accumulative statistics for "no-counter" type statistics */
-       accum_general->temperature = general->temperature;
-       accum_general->ttl_timestamp = general->ttl_timestamp;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-void iwl4965_rx_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-       int change;
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       IWL_DEBUG_RX(priv,
-                    "Statistics notification received (%d vs %d).\n",
-                    (int)sizeof(struct iwl_notif_statistics),
-                    le32_to_cpu(pkt->len_n_flags) &
-                    FH_RSCSR_FRAME_SIZE_MSK);
-
-       change = ((priv->_4965.statistics.general.common.temperature !=
-                  pkt->u.stats.general.common.temperature) ||
-                  ((priv->_4965.statistics.flag &
-                  STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
-                  (pkt->u.stats.flag &
-                  STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-       iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
-       /* TODO: reading some of statistics is unneeded */
-       memcpy(&priv->_4965.statistics, &pkt->u.stats,
-               sizeof(priv->_4965.statistics));
-
-       set_bit(STATUS_STATISTICS, &priv->status);
-
-       /* Reschedule the statistics timer to occur in
-        * REG_RECALIB_PERIOD seconds to ensure we get a
-        * thermal update even if the uCode doesn't give
-        * us one */
-       mod_timer(&priv->statistics_periodic, jiffies +
-                 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
-       if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-           (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
-               iwl4965_rx_calc_noise(priv);
-               queue_work(priv->workqueue, &priv->run_time_calib_work);
-       }
-       if (priv->cfg->ops->lib->temp_ops.temperature && change)
-               priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl4965_reply_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-               memset(&priv->_4965.accum_statistics, 0,
-                       sizeof(struct iwl_notif_statistics));
-               memset(&priv->_4965.delta_statistics, 0,
-                       sizeof(struct iwl_notif_statistics));
-               memset(&priv->_4965.max_delta, 0,
-                       sizeof(struct iwl_notif_statistics));
-#endif
-               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
-       }
-       iwl4965_rx_statistics(priv, rxb);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644 (file)
index a262c23..0000000
+++ /dev/null
@@ -1,721 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-4965.h"
-
-static struct iwl_link_quality_cmd *
-iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
-{
-       int i, r;
-       struct iwl_link_quality_cmd *link_cmd;
-       u32 rate_flags = 0;
-       __le32 rate_n_flags;
-
-       link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
-       if (!link_cmd) {
-               IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
-               return NULL;
-       }
-       /* Set up the rate scaling to start at selected rate, fall back
-        * all the way down to 1M in IEEE order, and then spin on 1M */
-       if (priv->band == IEEE80211_BAND_5GHZ)
-               r = IWL_RATE_6M_INDEX;
-       else
-               r = IWL_RATE_1M_INDEX;
-
-       if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
-               rate_flags |= RATE_MCS_CCK_MSK;
-
-       rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
-                               RATE_MCS_ANT_POS;
-       rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
-                                                  rate_flags);
-       for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
-               link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
-       link_cmd->general_params.single_stream_ant_msk =
-                               iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
-       link_cmd->general_params.dual_stream_ant_msk =
-               priv->hw_params.valid_tx_ant &
-               ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-       if (!link_cmd->general_params.dual_stream_ant_msk) {
-               link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
-       } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
-               link_cmd->general_params.dual_stream_ant_msk =
-                       priv->hw_params.valid_tx_ant;
-       }
-
-       link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-       link_cmd->agg_params.agg_time_limit =
-               cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
-       link_cmd->sta_id = sta_id;
-
-       return link_cmd;
-}
-
-/*
- * iwl4965_add_bssid_station - Add the special IBSS BSSID station
- *
- * Function sleeps.
- */
-int
-iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-                            const u8 *addr, u8 *sta_id_r)
-{
-       int ret;
-       u8 sta_id;
-       struct iwl_link_quality_cmd *link_cmd;
-       unsigned long flags;
-
-       if (sta_id_r)
-               *sta_id_r = IWL_INVALID_STATION;
-
-       ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM\n", addr);
-               return ret;
-       }
-
-       if (sta_id_r)
-               *sta_id_r = sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].used |= IWL_STA_LOCAL;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       /* Set up default rate scaling table in device's station table */
-       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
-       if (!link_cmd) {
-               IWL_ERR(priv,
-                       "Unable to initialize rate scaling for station %pM.\n",
-                       addr);
-               return -ENOMEM;
-       }
-
-       ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
-       if (ret)
-               IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].lq = link_cmd;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-
-static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
-                                     struct iwl_rxon_context *ctx,
-                                     bool send_if_empty)
-{
-       int i, not_empty = 0;
-       u8 buff[sizeof(struct iwl_wep_cmd) +
-               sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
-       struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
-       size_t cmd_size  = sizeof(struct iwl_wep_cmd);
-       struct iwl_host_cmd cmd = {
-               .id = ctx->wep_key_cmd,
-               .data = wep_cmd,
-               .flags = CMD_SYNC,
-       };
-
-       might_sleep();
-
-       memset(wep_cmd, 0, cmd_size +
-                       (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
-
-       for (i = 0; i < WEP_KEYS_MAX ; i++) {
-               wep_cmd->key[i].key_index = i;
-               if (ctx->wep_keys[i].key_size) {
-                       wep_cmd->key[i].key_offset = i;
-                       not_empty = 1;
-               } else {
-                       wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
-               }
-
-               wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
-               memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
-                               ctx->wep_keys[i].key_size);
-       }
-
-       wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
-       wep_cmd->num_keys = WEP_KEYS_MAX;
-
-       cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
-
-       cmd.len = cmd_size;
-
-       if (not_empty || send_if_empty)
-               return iwl_legacy_send_cmd(priv, &cmd);
-       else
-               return 0;
-}
-
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
-                                struct iwl_rxon_context *ctx)
-{
-       lockdep_assert_held(&priv->mutex);
-
-       return iwl4965_static_wepkey_cmd(priv, ctx, false);
-}
-
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx,
-                              struct ieee80211_key_conf *keyconf)
-{
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
-                     keyconf->keyidx);
-
-       memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
-       if (iwl_legacy_is_rfkill(priv)) {
-               IWL_DEBUG_WEP(priv,
-               "Not sending REPLY_WEPKEY command due to RFKILL.\n");
-               /* but keys in device are clear anyway so return success */
-               return 0;
-       }
-       ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
-       IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
-                     keyconf->keyidx, ret);
-
-       return ret;
-}
-
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx,
-                           struct ieee80211_key_conf *keyconf)
-{
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (keyconf->keylen != WEP_KEY_LEN_128 &&
-           keyconf->keylen != WEP_KEY_LEN_64) {
-               IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
-               return -EINVAL;
-       }
-
-       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-       keyconf->hw_key_idx = HW_KEY_DEFAULT;
-       priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
-
-       ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
-       memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
-                                                       keyconf->keylen);
-
-       ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
-       IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
-               keyconf->keylen, keyconf->keyidx, ret);
-
-       return ret;
-}
-
-static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
-                                       struct iwl_rxon_context *ctx,
-                                       struct ieee80211_key_conf *keyconf,
-                                       u8 sta_id)
-{
-       unsigned long flags;
-       __le16 key_flags = 0;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       lockdep_assert_held(&priv->mutex);
-
-       keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
-       key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
-       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-       key_flags &= ~STA_KEY_FLG_INVALID;
-
-       if (keyconf->keylen == WEP_KEY_LEN_128)
-               key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
-       if (sta_id == ctx->bcast_sta_id)
-               key_flags |= STA_KEY_MULTICAST_MSK;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-
-       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-       priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
-       memcpy(priv->stations[sta_id].keyinfo.key,
-                               keyconf->key, keyconf->keylen);
-
-       memcpy(&priv->stations[sta_id].sta.key.key[3],
-                               keyconf->key, keyconf->keylen);
-
-       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-                       == STA_KEY_FLG_NO_ENC)
-               priv->stations[sta_id].sta.key.key_offset =
-                                iwl_legacy_get_free_ucode_key_index(priv);
-       /* else, we are overriding an existing key => no need to allocated room
-        * in uCode. */
-
-       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-               "no space for a new key");
-
-       priv->stations[sta_id].sta.key.key_flags = key_flags;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                       sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
-                                        struct iwl_rxon_context *ctx,
-                                        struct ieee80211_key_conf *keyconf,
-                                        u8 sta_id)
-{
-       unsigned long flags;
-       __le16 key_flags = 0;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       lockdep_assert_held(&priv->mutex);
-
-       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
-       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-       key_flags &= ~STA_KEY_FLG_INVALID;
-
-       if (sta_id == ctx->bcast_sta_id)
-               key_flags |= STA_KEY_MULTICAST_MSK;
-
-       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
-       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
-              keyconf->keylen);
-
-       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
-              keyconf->keylen);
-
-       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-                       == STA_KEY_FLG_NO_ENC)
-               priv->stations[sta_id].sta.key.key_offset =
-                                iwl_legacy_get_free_ucode_key_index(priv);
-       /* else, we are overriding an existing key => no need to allocated room
-        * in uCode. */
-
-       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-               "no space for a new key");
-
-       priv->stations[sta_id].sta.key.key_flags = key_flags;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                        sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
-                                        struct iwl_rxon_context *ctx,
-                                        struct ieee80211_key_conf *keyconf,
-                                        u8 sta_id)
-{
-       unsigned long flags;
-       int ret = 0;
-       __le16 key_flags = 0;
-
-       key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
-       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-       key_flags &= ~STA_KEY_FLG_INVALID;
-
-       if (sta_id == ctx->bcast_sta_id)
-               key_flags |= STA_KEY_MULTICAST_MSK;
-
-       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-
-       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-       priv->stations[sta_id].keyinfo.keylen = 16;
-
-       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-                       == STA_KEY_FLG_NO_ENC)
-               priv->stations[sta_id].sta.key.key_offset =
-                                iwl_legacy_get_free_ucode_key_index(priv);
-       /* else, we are overriding an existing key => no need to allocated room
-        * in uCode. */
-
-       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-               "no space for a new key");
-
-       priv->stations[sta_id].sta.key.key_flags = key_flags;
-
-
-       /* This copy is acutally not needed: we get the key with each TX */
-       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
-
-       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return ret;
-}
-
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
-                        struct iwl_rxon_context *ctx,
-                        struct ieee80211_key_conf *keyconf,
-                        struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
-{
-       u8 sta_id;
-       unsigned long flags;
-       int i;
-
-       if (iwl_legacy_scan_cancel(priv)) {
-               /* cancel scan failed, just live w/ bad key and rely
-                  briefly on SW decryption */
-               return;
-       }
-
-       sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
-       if (sta_id == IWL_INVALID_STATION)
-               return;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-
-       priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
-       for (i = 0; i < 5; i++)
-               priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
-                       cpu_to_le16(phase1key[i]);
-
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-       iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
-
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
-                          struct iwl_rxon_context *ctx,
-                          struct ieee80211_key_conf *keyconf,
-                          u8 sta_id)
-{
-       unsigned long flags;
-       u16 key_flags;
-       u8 keyidx;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       lockdep_assert_held(&priv->mutex);
-
-       ctx->key_mapping_keys--;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
-       keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
-       IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
-                     keyconf->keyidx, sta_id);
-
-       if (keyconf->keyidx != keyidx) {
-               /* We need to remove a key with index different that the one
-                * in the uCode. This means that the key we need to remove has
-                * been replaced by another one with different index.
-                * Don't do anything and return ok
-                */
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-               return 0;
-       }
-
-       if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
-               IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
-                           keyconf->keyidx, key_flags);
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-               return 0;
-       }
-
-       if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
-               &priv->ucode_key_table))
-               IWL_ERR(priv, "index %d not used in uCode key table.\n",
-                       priv->stations[sta_id].sta.key.key_offset);
-       memset(&priv->stations[sta_id].keyinfo, 0,
-                                       sizeof(struct iwl_hw_key));
-       memset(&priv->stations[sta_id].sta.key, 0,
-                                       sizeof(struct iwl4965_keyinfo));
-       priv->stations[sta_id].sta.key.key_flags =
-                       STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
-       priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-       if (iwl_legacy_is_rfkill(priv)) {
-               IWL_DEBUG_WEP(priv,
-                "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-               return 0;
-       }
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                       sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-                       struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       ctx->key_mapping_keys++;
-       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
-       switch (keyconf->cipher) {
-       case WLAN_CIPHER_SUITE_CCMP:
-               ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
-                                                       keyconf, sta_id);
-               break;
-       case WLAN_CIPHER_SUITE_TKIP:
-               ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
-                                                       keyconf, sta_id);
-               break;
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
-               ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
-                                                       keyconf, sta_id);
-               break;
-       default:
-               IWL_ERR(priv,
-                       "Unknown alg: %s cipher = %x\n", __func__,
-                       keyconf->cipher);
-               ret = -EINVAL;
-       }
-
-       IWL_DEBUG_WEP(priv,
-               "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
-                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-                     sta_id, ret);
-
-       return ret;
-}
-
-/**
- * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
- *
- * This adds the broadcast station into the driver's station table
- * and marks it driver active, so that it will be restored to the
- * device at the next best time.
- */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx)
-{
-       struct iwl_link_quality_cmd *link_cmd;
-       unsigned long flags;
-       u8 sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
-                                                               false, NULL);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Unable to prepare broadcast station\n");
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-               return -EINVAL;
-       }
-
-       priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
-       priv->stations[sta_id].used |= IWL_STA_BCAST;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
-       if (!link_cmd) {
-               IWL_ERR(priv,
-                       "Unable to initialize rate scaling for bcast station.\n");
-               return -ENOMEM;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].lq = link_cmd;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-
-/**
- * iwl4965_update_bcast_station - update broadcast station's LQ command
- *
- * Only used by iwl4965. Placed here to have all bcast station management
- * code together.
- */
-static int iwl4965_update_bcast_station(struct iwl_priv *priv,
-                                   struct iwl_rxon_context *ctx)
-{
-       unsigned long flags;
-       struct iwl_link_quality_cmd *link_cmd;
-       u8 sta_id = ctx->bcast_sta_id;
-
-       link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
-       if (!link_cmd) {
-               IWL_ERR(priv,
-               "Unable to initialize rate scaling for bcast station.\n");
-               return -ENOMEM;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       if (priv->stations[sta_id].lq)
-               kfree(priv->stations[sta_id].lq);
-       else
-               IWL_DEBUG_INFO(priv,
-               "Bcast station rate scaling has not been initialized yet.\n");
-       priv->stations[sta_id].lq = link_cmd;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-
-int iwl4965_update_bcast_stations(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx;
-       int ret = 0;
-
-       for_each_context(priv, ctx) {
-               ret = iwl4965_update_bcast_station(priv, ctx);
-               if (ret)
-                       break;
-       }
-
-       return ret;
-}
-
-/**
- * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
- */
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
-{
-       unsigned long flags;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       lockdep_assert_held(&priv->mutex);
-
-       /* Remove "disable" flag, to enable Tx for this TID */
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
-       priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                                       sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
-                        int tid, u16 ssn)
-{
-       unsigned long flags;
-       int sta_id;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       lockdep_assert_held(&priv->mutex);
-
-       sta_id = iwl_legacy_sta_id(sta);
-       if (sta_id == IWL_INVALID_STATION)
-               return -ENXIO;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].sta.station_flags_msk = 0;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
-       priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
-       priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                                       sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
-                       int tid)
-{
-       unsigned long flags;
-       int sta_id;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       lockdep_assert_held(&priv->mutex);
-
-       sta_id = iwl_legacy_sta_id(sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
-               return -ENXIO;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].sta.station_flags_msk = 0;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
-       priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                               sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-void
-iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
-       priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
-       priv->stations[sta_id].sta.sta.modify_mask =
-                                       STA_MODIFY_SLEEP_TX_COUNT_MSK;
-       priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       iwl_legacy_send_add_sta(priv,
-                               &priv->stations[sta_id].sta, CMD_ASYNC);
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644 (file)
index 7f12e36..0000000
+++ /dev/null
@@ -1,1378 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- *     VO      0
- *     VI      1
- *     BE      2
- *     BK      3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
-       IEEE80211_AC_BE,
-       IEEE80211_AC_BK,
-       IEEE80211_AC_BK,
-       IEEE80211_AC_BE,
-       IEEE80211_AC_VI,
-       IEEE80211_AC_VI,
-       IEEE80211_AC_VO,
-       IEEE80211_AC_VO
-};
-
-static inline int iwl4965_get_ac_from_tid(u16 tid)
-{
-       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
-               return tid_to_ac[tid];
-
-       /* no support for TIDs 8-15 yet */
-       return -EINVAL;
-}
-
-static inline int
-iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
-       if (likely(tid < ARRAY_SIZE(tid_to_ac)))
-               return ctx->ac_to_fifo[tid_to_ac[tid]];
-
-       /* no support for TIDs 8-15 yet */
-       return -EINVAL;
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
-                                       struct sk_buff *skb,
-                                       struct iwl_tx_cmd *tx_cmd,
-                                       struct ieee80211_tx_info *info,
-                                       struct ieee80211_hdr *hdr,
-                                       u8 std_id)
-{
-       __le16 fc = hdr->frame_control;
-       __le32 tx_flags = tx_cmd->tx_flags;
-
-       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-               tx_flags |= TX_CMD_FLG_ACK_MSK;
-               if (ieee80211_is_mgmt(fc))
-                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-               if (ieee80211_is_probe_resp(fc) &&
-                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
-                       tx_flags |= TX_CMD_FLG_TSF_MSK;
-       } else {
-               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-       }
-
-       if (ieee80211_is_back_req(fc))
-               tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
-       tx_cmd->sta_id = std_id;
-       if (ieee80211_has_morefrags(fc))
-               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
-       if (ieee80211_is_data_qos(fc)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tx_cmd->tid_tspec = qc[0] & 0xf;
-               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
-       } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-       }
-
-       iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
-       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
-       if (ieee80211_is_mgmt(fc)) {
-               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
-               else
-                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
-       } else {
-               tx_cmd->timeout.pm_frame_timeout = 0;
-       }
-
-       tx_cmd->driver_txop = 0;
-       tx_cmd->tx_flags = tx_flags;
-       tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_DFAULT_RETRY_LIMIT         60
-
-static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
-                             struct iwl_tx_cmd *tx_cmd,
-                             struct ieee80211_tx_info *info,
-                             __le16 fc)
-{
-       u32 rate_flags;
-       int rate_idx;
-       u8 rts_retry_limit;
-       u8 data_retry_limit;
-       u8 rate_plcp;
-
-       /* Set retry limit on DATA packets and Probe Responses*/
-       if (ieee80211_is_probe_resp(fc))
-               data_retry_limit = 3;
-       else
-               data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
-       tx_cmd->data_retry_limit = data_retry_limit;
-
-       /* Set retry limit on RTS packets */
-       rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
-       if (data_retry_limit < rts_retry_limit)
-               rts_retry_limit = data_retry_limit;
-       tx_cmd->rts_retry_limit = rts_retry_limit;
-
-       /* DATA packets will use the uCode station table for rate/antenna
-        * selection */
-       if (ieee80211_is_data(fc)) {
-               tx_cmd->initial_rate_index = 0;
-               tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-               return;
-       }
-
-       /**
-        * If the current TX rate stored in mac80211 has the MCS bit set, it's
-        * not really a TX rate.  Thus, we use the lowest supported rate for
-        * this band.  Also use the lowest supported rate if the stored rate
-        * index is invalid.
-        */
-       rate_idx = info->control.rates[0].idx;
-       if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
-                       (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
-               rate_idx = rate_lowest_index(&priv->bands[info->band],
-                               info->control.sta);
-       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-       if (info->band == IEEE80211_BAND_5GHZ)
-               rate_idx += IWL_FIRST_OFDM_RATE;
-       /* Get PLCP rate for tx_cmd->rate_n_flags */
-       rate_plcp = iwlegacy_rates[rate_idx].plcp;
-       /* Zero out flags for this packet */
-       rate_flags = 0;
-
-       /* Set CCK flag as needed */
-       if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
-               rate_flags |= RATE_MCS_CCK_MSK;
-
-       /* Set up antennas */
-       priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-                                     priv->hw_params.valid_tx_ant);
-
-       rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-
-       /* Set the rate in the TX cmd */
-       tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
-                                     struct ieee80211_tx_info *info,
-                                     struct iwl_tx_cmd *tx_cmd,
-                                     struct sk_buff *skb_frag,
-                                     int sta_id)
-{
-       struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
-       switch (keyconf->cipher) {
-       case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-               if (info->flags & IEEE80211_TX_CTL_AMPDU)
-                       tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
-               IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
-               break;
-
-       case WLAN_CIPHER_SUITE_TKIP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
-               ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
-               IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
-               break;
-
-       case WLAN_CIPHER_SUITE_WEP104:
-               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-               /* fall through */
-       case WLAN_CIPHER_SUITE_WEP40:
-               tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
-                       (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
-               memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
-               IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
-                            "with key %d\n", keyconf->keyidx);
-               break;
-
-       default:
-               IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
-               break;
-       }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_sta *sta = info->control.sta;
-       struct iwl_station_priv *sta_priv = NULL;
-       struct iwl_tx_queue *txq;
-       struct iwl_queue *q;
-       struct iwl_device_cmd *out_cmd;
-       struct iwl_cmd_meta *out_meta;
-       struct iwl_tx_cmd *tx_cmd;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       int txq_id;
-       dma_addr_t phys_addr;
-       dma_addr_t txcmd_phys;
-       dma_addr_t scratch_phys;
-       u16 len, firstlen, secondlen;
-       u16 seq_number = 0;
-       __le16 fc;
-       u8 hdr_len;
-       u8 sta_id;
-       u8 wait_write_ptr = 0;
-       u8 tid = 0;
-       u8 *qc = NULL;
-       unsigned long flags;
-       bool is_agg = false;
-
-       if (info->control.vif)
-               ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
-
-       spin_lock_irqsave(&priv->lock, flags);
-       if (iwl_legacy_is_rfkill(priv)) {
-               IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
-               goto drop_unlock;
-       }
-
-       fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (ieee80211_is_auth(fc))
-               IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
-       else if (ieee80211_is_assoc_req(fc))
-               IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
-       else if (ieee80211_is_reassoc_req(fc))
-               IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
-       hdr_len = ieee80211_hdrlen(fc);
-
-       /* For management frames use broadcast id to do not break aggregation */
-       if (!ieee80211_is_data(fc))
-               sta_id = ctx->bcast_sta_id;
-       else {
-               /* Find index into station table for destination station */
-               sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-
-               if (sta_id == IWL_INVALID_STATION) {
-                       IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-                                      hdr->addr1);
-                       goto drop_unlock;
-               }
-       }
-
-       IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
-       if (sta)
-               sta_priv = (void *)sta->drv_priv;
-
-       if (sta_priv && sta_priv->asleep &&
-           (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
-               /*
-                * This sends an asynchronous command to the device,
-                * but we can rely on it being processed before the
-                * next frame is processed -- and the next frame to
-                * this station is the one that will consume this
-                * counter.
-                * For now set the counter to just 1 since we do not
-                * support uAPSD yet.
-                */
-               iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
-       }
-
-       /*
-        * Send this frame after DTIM -- there's a special queue
-        * reserved for this for contexts that support AP mode.
-        */
-       if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
-               txq_id = ctx->mcast_queue;
-               /*
-                * The microcode will clear the more data
-                * bit in the last frame it transmits.
-                */
-               hdr->frame_control |=
-                       cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-       } else
-               txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
-       /* irqs already disabled/saved above when locking priv->lock */
-       spin_lock(&priv->sta_lock);
-
-       if (ieee80211_is_data_qos(fc)) {
-               qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-               if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
-                       spin_unlock(&priv->sta_lock);
-                       goto drop_unlock;
-               }
-               seq_number = priv->stations[sta_id].tid[tid].seq_number;
-               seq_number &= IEEE80211_SCTL_SEQ;
-               hdr->seq_ctrl = hdr->seq_ctrl &
-                               cpu_to_le16(IEEE80211_SCTL_FRAG);
-               hdr->seq_ctrl |= cpu_to_le16(seq_number);
-               seq_number += 0x10;
-               /* aggregation is on for this <sta,tid> */
-               if (info->flags & IEEE80211_TX_CTL_AMPDU &&
-                   priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
-                       txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
-                       is_agg = true;
-               }
-       }
-
-       txq = &priv->txq[txq_id];
-       q = &txq->q;
-
-       if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
-               spin_unlock(&priv->sta_lock);
-               goto drop_unlock;
-       }
-
-       if (ieee80211_is_data_qos(fc)) {
-               priv->stations[sta_id].tid[tid].tfds_in_queue++;
-               if (!ieee80211_has_morefrags(fc))
-                       priv->stations[sta_id].tid[tid].seq_number = seq_number;
-       }
-
-       spin_unlock(&priv->sta_lock);
-
-       /* Set up driver data for this TFD */
-       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-       txq->txb[q->write_ptr].skb = skb;
-       txq->txb[q->write_ptr].ctx = ctx;
-
-       /* Set up first empty entry in queue's array of Tx/cmd buffers */
-       out_cmd = txq->cmd[q->write_ptr];
-       out_meta = &txq->meta[q->write_ptr];
-       tx_cmd = &out_cmd->cmd.tx;
-       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-       memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
-       /*
-        * Set up the Tx-command (not MAC!) header.
-        * Store the chosen Tx queue and TFD index within the sequence field;
-        * after Tx, uCode's Tx response will return this value so driver can
-        * locate the frame within the tx queue and do post-tx processing.
-        */
-       out_cmd->hdr.cmd = REPLY_TX;
-       out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
-                               INDEX_TO_SEQ(q->write_ptr)));
-
-       /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
-       /* Total # bytes to be transmitted */
-       len = (u16)skb->len;
-       tx_cmd->len = cpu_to_le16(len);
-
-       if (info->control.hw_key)
-               iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
-       /* TODO need this for burst mode later on */
-       iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
-       iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-
-       iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
-
-       iwl_legacy_update_stats(priv, true, fc, len);
-       /*
-        * Use the first empty entry in this queue's command buffer array
-        * to contain the Tx command and MAC header concatenated together
-        * (payload data will be in another buffer).
-        * Size of this varies, due to varying MAC header length.
-        * If end is not dword aligned, we'll have 2 extra bytes at the end
-        * of the MAC header (device reads on dword boundaries).
-        * We'll tell device about this padding later.
-        */
-       len = sizeof(struct iwl_tx_cmd) +
-               sizeof(struct iwl_cmd_header) + hdr_len;
-       firstlen = (len + 3) & ~3;
-
-       /* Tell NIC about any 2-byte padding after MAC header */
-       if (firstlen != len)
-               tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
-       /* Physical address of this Tx command's header (not MAC header!),
-        * within command buffer array. */
-       txcmd_phys = pci_map_single(priv->pci_dev,
-                                   &out_cmd->hdr, firstlen,
-                                   PCI_DMA_BIDIRECTIONAL);
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, firstlen);
-       /* Add buffer containing Tx command and MAC(!) header to TFD's
-        * first entry */
-       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                  txcmd_phys, firstlen, 1, 0);
-
-       if (!ieee80211_has_morefrags(hdr->frame_control)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
-
-       /* Set up TFD's 2nd entry to point directly to remainder of skb,
-        * if any (802.11 null frames have no payload). */
-       secondlen = skb->len - hdr_len;
-       if (secondlen > 0) {
-               phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
-                                          secondlen, PCI_DMA_TODEVICE);
-               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                          phys_addr, secondlen,
-                                                          0, 0);
-       }
-
-       scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
-                               offsetof(struct iwl_tx_cmd, scratch);
-
-       /* take back ownership of DMA buffer to enable update */
-       pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
-                                   firstlen, PCI_DMA_BIDIRECTIONAL);
-       tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
-       tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
-
-       IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
-                    le16_to_cpu(out_cmd->hdr.sequence));
-       IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
-       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
-       /* Set up entry for this TFD in Tx byte-count array */
-       if (info->flags & IEEE80211_TX_CTL_AMPDU)
-               priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
-                                                    le16_to_cpu(tx_cmd->len));
-
-       pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
-                                      firstlen, PCI_DMA_BIDIRECTIONAL);
-
-       trace_iwlwifi_legacy_dev_tx(priv,
-                            &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
-                            sizeof(struct iwl_tfd),
-                            &out_cmd->hdr, firstlen,
-                            skb->data + hdr_len, secondlen);
-
-       /* Tell device the write index *just past* this latest filled TFD */
-       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_legacy_txq_update_write_ptr(priv, txq);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /*
-        * At this point the frame is "transmitted" successfully
-        * and we will get a TX status notification eventually,
-        * regardless of the value of ret. "ret" only indicates
-        * whether or not we should update the write pointer.
-        */
-
-       /*
-        * Avoid atomic ops if it isn't an associated client.
-        * Also, if this is a packet for aggregation, don't
-        * increase the counter because the ucode will stop
-        * aggregation queues when their respective station
-        * goes to sleep.
-        */
-       if (sta_priv && sta_priv->client && !is_agg)
-               atomic_inc(&sta_priv->pending_frames);
-
-       if ((iwl_legacy_queue_space(q) < q->high_mark) &&
-                       priv->mac80211_registered) {
-               if (wait_write_ptr) {
-                       spin_lock_irqsave(&priv->lock, flags);
-                       txq->need_update = 1;
-                       iwl_legacy_txq_update_write_ptr(priv, txq);
-                       spin_unlock_irqrestore(&priv->lock, flags);
-               } else {
-                       iwl_legacy_stop_queue(priv, txq);
-               }
-       }
-
-       return 0;
-
-drop_unlock:
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return -1;
-}
-
-static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
-                                   struct iwl_dma_ptr *ptr, size_t size)
-{
-       ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
-                                      GFP_KERNEL);
-       if (!ptr->addr)
-               return -ENOMEM;
-       ptr->size = size;
-       return 0;
-}
-
-static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
-                                   struct iwl_dma_ptr *ptr)
-{
-       if (unlikely(!ptr->addr))
-               return;
-
-       dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
-       memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwl4965_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
-{
-       int txq_id;
-
-       /* Tx queues */
-       if (priv->txq) {
-               for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-                       if (txq_id == priv->cmd_queue)
-                               iwl_legacy_cmd_queue_free(priv);
-                       else
-                               iwl_legacy_tx_queue_free(priv, txq_id);
-       }
-       iwl4965_free_dma_ptr(priv, &priv->kw);
-
-       iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
-       /* free tx queue structure */
-       iwl_legacy_txq_mem(priv);
-}
-
-/**
- * iwl4965_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
-{
-       int ret;
-       int txq_id, slots_num;
-       unsigned long flags;
-
-       /* Free all tx/cmd queues and keep-warm buffer */
-       iwl4965_hw_txq_ctx_free(priv);
-
-       ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
-                               priv->hw_params.scd_bc_tbls_size);
-       if (ret) {
-               IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
-               goto error_bc_tbls;
-       }
-       /* Alloc keep-warm buffer */
-       ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
-       if (ret) {
-               IWL_ERR(priv, "Keep Warm allocation failed\n");
-               goto error_kw;
-       }
-
-       /* allocate tx queue structure */
-       ret = iwl_legacy_alloc_txq_mem(priv);
-       if (ret)
-               goto error;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Turn off all Tx DMA fifos */
-       iwl4965_txq_set_sched(priv, 0);
-
-       /* Tell NIC where to find the "keep warm" buffer */
-       iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Alloc and init all Tx queues, including the command queue (#4/#9) */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               slots_num = (txq_id == priv->cmd_queue) ?
-                                       TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               ret = iwl_legacy_tx_queue_init(priv,
-                                       &priv->txq[txq_id], slots_num,
-                                      txq_id);
-               if (ret) {
-                       IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
-                       goto error;
-               }
-       }
-
-       return ret;
-
- error:
-       iwl4965_hw_txq_ctx_free(priv);
-       iwl4965_free_dma_ptr(priv, &priv->kw);
- error_kw:
-       iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
-       return ret;
-}
-
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
-{
-       int txq_id, slots_num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Turn off all Tx DMA fifos */
-       iwl4965_txq_set_sched(priv, 0);
-
-       /* Tell NIC where to find the "keep warm" buffer */
-       iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Alloc and init all Tx queues, including the command queue (#4) */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
-               slots_num = txq_id == priv->cmd_queue ?
-                           TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
-                                               slots_num, txq_id);
-       }
-}
-
-/**
- * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
-{
-       int ch, txq_id;
-       unsigned long flags;
-
-       /* Turn off all Tx DMA fifos */
-       spin_lock_irqsave(&priv->lock, flags);
-
-       iwl4965_txq_set_sched(priv, 0);
-
-       /* Stop each Tx DMA channel, and wait for it to be idle */
-       for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
-               iwl_legacy_write_direct32(priv,
-                               FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
-               if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
-                                   FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
-                                   1000))
-                       IWL_ERR(priv, "Failing on timeout while stopping"
-                           " DMA channel %d [0x%08x]", ch,
-                           iwl_legacy_read_direct32(priv,
-                                       FH_TSSR_TX_STATUS_REG));
-       }
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (!priv->txq)
-               return;
-
-       /* Unmap DMA from host system and free skb's */
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-               if (txq_id == priv->cmd_queue)
-                       iwl_legacy_cmd_queue_unmap(priv);
-               else
-                       iwl_legacy_tx_queue_unmap(priv, txq_id);
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
-{
-       int txq_id;
-
-       for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
-               if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
-                       return txq_id;
-       return -1;
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
-                                           u16 txq_id)
-{
-       /* Simply stop the queue, but don't change any configuration;
-        * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
-       iwl_legacy_write_prph(priv,
-               IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-               (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
-               (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
-                                       u16 txq_id)
-{
-       u32 tbl_dw_addr;
-       u32 tbl_dw;
-       u16 scd_q2ratid;
-
-       scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
-       tbl_dw_addr = priv->scd_base_addr +
-                       IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
-       tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
-
-       if (txq_id & 0x1)
-               tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
-       else
-               tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
-       iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
-       return 0;
-}
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE:  txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- *        i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
-                                 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
-       unsigned long flags;
-       u16 ra_tid;
-       int ret;
-
-       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-           (IWL49_FIRST_AMPDU_QUEUE +
-               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-               IWL_WARN(priv,
-                       "queue number out of range: %d, must be %d to %d\n",
-                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
-                       IWL49_FIRST_AMPDU_QUEUE +
-                       priv->cfg->base_params->num_of_ampdu_queues - 1);
-               return -EINVAL;
-       }
-
-       ra_tid = BUILD_RAxTID(sta_id, tid);
-
-       /* Modify device's station table to Tx this TID */
-       ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
-       if (ret)
-               return ret;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Stop this Tx queue before configuring it */
-       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-       /* Map receiver-address / traffic-ID to this queue */
-       iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
-       /* Set this queue as a chain-building queue */
-       iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-       /* Place first TFD at index corresponding to start sequence number.
-        * Assumes that ssn_idx is valid (!= 0xFFF) */
-       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-       /* Set up Tx window size and frame limit for this queue */
-       iwl_legacy_write_targ_mem(priv,
-               priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
-               (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-       iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
-               IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
-               (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
-               & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-       iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
-       /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
-       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
-       int sta_id;
-       int tx_fifo;
-       int txq_id;
-       int ret;
-       unsigned long flags;
-       struct iwl_tid_data *tid_data;
-
-       tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
-       if (unlikely(tx_fifo < 0))
-               return tx_fifo;
-
-       IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
-                       __func__, sta->addr, tid);
-
-       sta_id = iwl_legacy_sta_id(sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Start AGG on invalid station\n");
-               return -ENXIO;
-       }
-       if (unlikely(tid >= MAX_TID_COUNT))
-               return -EINVAL;
-
-       if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
-               IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
-               return -ENXIO;
-       }
-
-       txq_id = iwl4965_txq_ctx_activate_free(priv);
-       if (txq_id == -1) {
-               IWL_ERR(priv, "No free aggregation queue available\n");
-               return -ENXIO;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       tid_data = &priv->stations[sta_id].tid[tid];
-       *ssn = SEQ_TO_SN(tid_data->seq_number);
-       tid_data->agg.txq_id = txq_id;
-       iwl_legacy_set_swq_id(&priv->txq[txq_id],
-                               iwl4965_get_ac_from_tid(tid), txq_id);
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
-                                                 sta_id, tid, *ssn);
-       if (ret)
-               return ret;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       tid_data = &priv->stations[sta_id].tid[tid];
-       if (tid_data->tfds_in_queue == 0) {
-               IWL_DEBUG_HT(priv, "HW queue is empty\n");
-               tid_data->agg.state = IWL_AGG_ON;
-               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-       } else {
-               IWL_DEBUG_HT(priv,
-                       "HW queue is NOT empty: %d packets in HW queue\n",
-                            tid_data->tfds_in_queue);
-               tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
-       }
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-       return ret;
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
-                                  u16 ssn_idx, u8 tx_fifo)
-{
-       if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
-           (IWL49_FIRST_AMPDU_QUEUE +
-               priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
-               IWL_WARN(priv,
-                       "queue number out of range: %d, must be %d to %d\n",
-                       txq_id, IWL49_FIRST_AMPDU_QUEUE,
-                       IWL49_FIRST_AMPDU_QUEUE +
-                       priv->cfg->base_params->num_of_ampdu_queues - 1);
-               return -EINVAL;
-       }
-
-       iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
-       iwl_legacy_clear_bits_prph(priv,
-                       IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
-       priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
-       priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
-       /* supposes that ssn_idx is valid (!= 0xFFF) */
-       iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
-       iwl_legacy_clear_bits_prph(priv,
-                        IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-       iwl_txq_ctx_deactivate(priv, txq_id);
-       iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
-       return 0;
-}
-
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
-                      struct ieee80211_sta *sta, u16 tid)
-{
-       int tx_fifo_id, txq_id, sta_id, ssn;
-       struct iwl_tid_data *tid_data;
-       int write_ptr, read_ptr;
-       unsigned long flags;
-
-       tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
-       if (unlikely(tx_fifo_id < 0))
-               return tx_fifo_id;
-
-       sta_id = iwl_legacy_sta_id(sta);
-
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
-               return -ENXIO;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-
-       tid_data = &priv->stations[sta_id].tid[tid];
-       ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
-       txq_id = tid_data->agg.txq_id;
-
-       switch (priv->stations[sta_id].tid[tid].agg.state) {
-       case IWL_EMPTYING_HW_QUEUE_ADDBA:
-               /*
-                * This can happen if the peer stops aggregation
-                * again before we've had a chance to drain the
-                * queue we selected previously, i.e. before the
-                * session was really started completely.
-                */
-               IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
-               goto turn_off;
-       case IWL_AGG_ON:
-               break;
-       default:
-               IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
-       }
-
-       write_ptr = priv->txq[txq_id].q.write_ptr;
-       read_ptr = priv->txq[txq_id].q.read_ptr;
-
-       /* The queue is not empty */
-       if (write_ptr != read_ptr) {
-               IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
-               priv->stations[sta_id].tid[tid].agg.state =
-                               IWL_EMPTYING_HW_QUEUE_DELBA;
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-               return 0;
-       }
-
-       IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
-       priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
-       /* do not restore/save irqs */
-       spin_unlock(&priv->sta_lock);
-       spin_lock(&priv->lock);
-
-       /*
-        * the only reason this call can fail is queue number out of range,
-        * which can happen if uCode is reloaded and all the station
-        * information are lost. if it is outside the range, there is no need
-        * to deactivate the uCode queue, just return "success" to allow
-        *  mac80211 to clean up it own data.
-        */
-       iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
-       return 0;
-}
-
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
-                          int sta_id, u8 tid, int txq_id)
-{
-       struct iwl_queue *q = &priv->txq[txq_id].q;
-       u8 *addr = priv->stations[sta_id].sta.sta.addr;
-       struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
-       struct iwl_rxon_context *ctx;
-
-       ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
-       lockdep_assert_held(&priv->sta_lock);
-
-       switch (priv->stations[sta_id].tid[tid].agg.state) {
-       case IWL_EMPTYING_HW_QUEUE_DELBA:
-               /* We are reclaiming the last packet of the */
-               /* aggregated HW queue */
-               if ((txq_id  == tid_data->agg.txq_id) &&
-                   (q->read_ptr == q->write_ptr)) {
-                       u16 ssn = SEQ_TO_SN(tid_data->seq_number);
-                       int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
-                       IWL_DEBUG_HT(priv,
-                               "HW queue empty: continue DELBA flow\n");
-                       iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
-                       tid_data->agg.state = IWL_AGG_OFF;
-                       ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
-               }
-               break;
-       case IWL_EMPTYING_HW_QUEUE_ADDBA:
-               /* We are reclaiming the last packet of the queue */
-               if (tid_data->tfds_in_queue == 0) {
-                       IWL_DEBUG_HT(priv,
-                               "HW queue empty: continue ADDBA flow\n");
-                       tid_data->agg.state = IWL_AGG_ON;
-                       ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
-               }
-               break;
-       }
-
-       return 0;
-}
-
-static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
-                                    struct iwl_rxon_context *ctx,
-                                    const u8 *addr1)
-{
-       struct ieee80211_sta *sta;
-       struct iwl_station_priv *sta_priv;
-
-       rcu_read_lock();
-       sta = ieee80211_find_sta(ctx->vif, addr1);
-       if (sta) {
-               sta_priv = (void *)sta->drv_priv;
-               /* avoid atomic ops if this isn't a client */
-               if (sta_priv->client &&
-                   atomic_dec_return(&sta_priv->pending_frames) == 0)
-                       ieee80211_sta_block_awake(priv->hw, sta, false);
-       }
-       rcu_read_unlock();
-}
-
-static void
-iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
-                            bool is_agg)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
-
-       if (!is_agg)
-               iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
-
-       ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-}
-
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
-       struct iwl_tx_info *tx_info;
-       int nfreed = 0;
-       struct ieee80211_hdr *hdr;
-
-       if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
-               IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
-                         "is out of range [0-%d] %d %d.\n", txq_id,
-                         index, q->n_bd, q->write_ptr, q->read_ptr);
-               return 0;
-       }
-
-       for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
-            q->read_ptr != index;
-            q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-               tx_info = &txq->txb[txq->q.read_ptr];
-
-               if (WARN_ON_ONCE(tx_info->skb == NULL))
-                       continue;
-
-               hdr = (struct ieee80211_hdr *)tx_info->skb->data;
-               if (ieee80211_is_data_qos(hdr->frame_control))
-                       nfreed++;
-
-               iwl4965_tx_status(priv, tx_info,
-                                txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
-               tx_info->skb = NULL;
-
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-       }
-       return nfreed;
-}
-
-/**
- * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
-                                struct iwl_ht_agg *agg,
-                                struct iwl_compressed_ba_resp *ba_resp)
-
-{
-       int i, sh, ack;
-       u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
-       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-       int successes = 0;
-       struct ieee80211_tx_info *info;
-       u64 bitmap, sent_bitmap;
-
-       if (unlikely(!agg->wait_for_ba))  {
-               if (unlikely(ba_resp->bitmap))
-                       IWL_ERR(priv, "Received BA when not expected\n");
-               return -EINVAL;
-       }
-
-       /* Mark that the expected block-ack response arrived */
-       agg->wait_for_ba = 0;
-       IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
-                                                       ba_resp->seq_ctl);
-
-       /* Calculate shift to align block-ack bits with our Tx window bits */
-       sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
-       if (sh < 0) /* tbw something is wrong with indices */
-               sh += 0x100;
-
-       if (agg->frame_count > (64 - sh)) {
-               IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
-               return -1;
-       }
-
-       /* don't use 64-bit values for now */
-       bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
-       /* check for success or failure according to the
-        * transmitted bitmap and block-ack bitmap */
-       sent_bitmap = bitmap & agg->bitmap;
-
-       /* For each frame attempted in aggregation,
-        * update driver's record of tx frame's status. */
-       i = 0;
-       while (sent_bitmap) {
-               ack = sent_bitmap & 1ULL;
-               successes += ack;
-               IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
-                       ack ? "ACK" : "NACK", i,
-                       (agg->start_idx + i) & 0xff,
-                       agg->start_idx + i);
-               sent_bitmap >>= 1;
-               ++i;
-       }
-
-       IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
-                                  (unsigned long long)bitmap);
-
-       info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
-       memset(&info->status, 0, sizeof(info->status));
-       info->flags |= IEEE80211_TX_STAT_ACK;
-       info->flags |= IEEE80211_TX_STAT_AMPDU;
-       info->status.ampdu_ack_len = successes;
-       info->status.ampdu_len = agg->frame_count;
-       iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
-       return 0;
-}
-
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
-                                 struct ieee80211_tx_info *info)
-{
-       struct ieee80211_tx_rate *r = &info->control.rates[0];
-
-       info->antenna_sel_tx =
-               ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
-       if (rate_n_flags & RATE_MCS_HT_MSK)
-               r->flags |= IEEE80211_TX_RC_MCS;
-       if (rate_n_flags & RATE_MCS_GF_MSK)
-               r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
-       if (rate_n_flags & RATE_MCS_HT40_MSK)
-               r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-       if (rate_n_flags & RATE_MCS_DUP_MSK)
-               r->flags |= IEEE80211_TX_RC_DUP_DATA;
-       if (rate_n_flags & RATE_MCS_SGI_MSK)
-               r->flags |= IEEE80211_TX_RC_SHORT_GI;
-       r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-
-/**
- * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                          struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
-       struct iwl_tx_queue *txq = NULL;
-       struct iwl_ht_agg *agg;
-       int index;
-       int sta_id;
-       int tid;
-       unsigned long flags;
-
-       /* "flow" corresponds to Tx queue */
-       u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
-       /* "ssn" is start of block-ack Tx window, corresponds to index
-        * (in Tx queue's circular buffer) of first TFD/frame in window */
-       u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
-       if (scd_flow >= priv->hw_params.max_txq_num) {
-               IWL_ERR(priv,
-                       "BUG_ON scd_flow is bigger than number of queues\n");
-               return;
-       }
-
-       txq = &priv->txq[scd_flow];
-       sta_id = ba_resp->sta_id;
-       tid = ba_resp->tid;
-       agg = &priv->stations[sta_id].tid[tid].agg;
-       if (unlikely(agg->txq_id != scd_flow)) {
-               /*
-                * FIXME: this is a uCode bug which need to be addressed,
-                * log the information and return for now!
-                * since it is possible happen very often and in order
-                * not to fill the syslog, don't enable the logging by default
-                */
-               IWL_DEBUG_TX_REPLY(priv,
-                       "BA scd_flow %d does not match txq_id %d\n",
-                       scd_flow, agg->txq_id);
-               return;
-       }
-
-       /* Find index just before block-ack window */
-       index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-
-       IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
-                          "sta_id = %d\n",
-                          agg->wait_for_ba,
-                          (u8 *) &ba_resp->sta_addr_lo32,
-                          ba_resp->sta_id);
-       IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
-                       "scd_flow = "
-                          "%d, scd_ssn = %d\n",
-                          ba_resp->tid,
-                          ba_resp->seq_ctl,
-                          (unsigned long long)le64_to_cpu(ba_resp->bitmap),
-                          ba_resp->scd_flow,
-                          ba_resp->scd_ssn);
-       IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
-                          agg->start_idx,
-                          (unsigned long long)agg->bitmap);
-
-       /* Update driver's record of ACK vs. not for each frame in window */
-       iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
-       /* Release all TFDs before the SSN, i.e. all TFDs in front of
-        * block-ack window (we assume that they've been successfully
-        * transmitted ... if not, it's too late anyway). */
-       if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
-               /* calculate mac80211 ampdu sw queue to wake */
-               int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
-               iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-
-               if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
-                   priv->mac80211_registered &&
-                   (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
-                       iwl_legacy_wake_queue(priv, txq);
-
-               iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
-       }
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
-
-       switch (status & TX_STATUS_MSK) {
-       case TX_STATUS_SUCCESS:
-               return "SUCCESS";
-       TX_STATUS_POSTPONE(DELAY);
-       TX_STATUS_POSTPONE(FEW_BYTES);
-       TX_STATUS_POSTPONE(QUIET_PERIOD);
-       TX_STATUS_POSTPONE(CALC_TTAK);
-       TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
-       TX_STATUS_FAIL(SHORT_LIMIT);
-       TX_STATUS_FAIL(LONG_LIMIT);
-       TX_STATUS_FAIL(FIFO_UNDERRUN);
-       TX_STATUS_FAIL(DRAIN_FLOW);
-       TX_STATUS_FAIL(RFKILL_FLUSH);
-       TX_STATUS_FAIL(LIFE_EXPIRE);
-       TX_STATUS_FAIL(DEST_PS);
-       TX_STATUS_FAIL(HOST_ABORTED);
-       TX_STATUS_FAIL(BT_RETRY);
-       TX_STATUS_FAIL(STA_INVALID);
-       TX_STATUS_FAIL(FRAG_DROPPED);
-       TX_STATUS_FAIL(TID_DISABLE);
-       TX_STATUS_FAIL(FIFO_FLUSHED);
-       TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
-       TX_STATUS_FAIL(PASSIVE_NO_RX);
-       TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
-       }
-
-       return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644 (file)
index 001d148..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-4965-calib.h"
-
-#define IWL_AC_UNSET -1
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int
-iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-       u32 val;
-       int ret = 0;
-       u32 errcnt = 0;
-       u32 i;
-
-       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-                       i + IWL4965_RTC_INST_LOWER_BOUND);
-               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-                       ret = -EIO;
-                       errcnt++;
-                       if (errcnt >= 3)
-                               break;
-               }
-       }
-
-       return ret;
-}
-
-/**
- * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
- *     looking at all data.
- */
-static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
-                                u32 len)
-{
-       u32 val;
-       u32 save_len = len;
-       int ret = 0;
-       u32 errcnt;
-
-       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-       iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-                          IWL4965_RTC_INST_LOWER_BOUND);
-
-       errcnt = 0;
-       for (; len > 0; len -= sizeof(u32), image++) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "uCode INST section is invalid at "
-                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
-                                 save_len - len, val, le32_to_cpu(*image));
-                       ret = -EIO;
-                       errcnt++;
-                       if (errcnt >= 20)
-                               break;
-               }
-       }
-
-       if (!errcnt)
-               IWL_DEBUG_INFO(priv,
-                   "ucode image in INSTRUCTION memory is good\n");
-
-       return ret;
-}
-
-/**
- * iwl4965_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-int iwl4965_verify_ucode(struct iwl_priv *priv)
-{
-       __le32 *image;
-       u32 len;
-       int ret;
-
-       /* Try bootstrap */
-       image = (__le32 *)priv->ucode_boot.v_addr;
-       len = priv->ucode_boot.len;
-       ret = iwl4965_verify_inst_sparse(priv, image, len);
-       if (!ret) {
-               IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       /* Try initialize */
-       image = (__le32 *)priv->ucode_init.v_addr;
-       len = priv->ucode_init.len;
-       ret = iwl4965_verify_inst_sparse(priv, image, len);
-       if (!ret) {
-               IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       /* Try runtime/protocol */
-       image = (__le32 *)priv->ucode_code.v_addr;
-       len = priv->ucode_code.len;
-       ret = iwl4965_verify_inst_sparse(priv, image, len);
-       if (!ret) {
-               IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
-       /* Since nothing seems to match, show first several data entries in
-        * instruction SRAM, so maybe visual inspection will give a clue.
-        * Selection of bootstrap image (vs. other images) is arbitrary. */
-       image = (__le32 *)priv->ucode_boot.v_addr;
-       len = priv->ucode_boot.len;
-       ret = iwl4965_verify_inst_full(priv, image, len);
-
-       return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644 (file)
index 86f4fce..0000000
+++ /dev/null
@@ -1,2183 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-4965-led.h"
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       u32 reg;
-       u32 val;
-
-       IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
-       /* verify BSM SRAM contents */
-       val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
-       for (reg = BSM_SRAM_LOWER_BOUND;
-            reg < BSM_SRAM_LOWER_BOUND + len;
-            reg += sizeof(u32), image++) {
-               val = iwl_legacy_read_prph(priv, reg);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "BSM uCode verification failed at "
-                                 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
-                                 BSM_SRAM_LOWER_BOUND,
-                                 reg - BSM_SRAM_LOWER_BOUND, len,
-                                 val, le32_to_cpu(*image));
-                       return -EIO;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
-       return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL.  When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers.  Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image.  This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program.  This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
-       __le32 *image = priv->ucode_boot.v_addr;
-       u32 len = priv->ucode_boot.len;
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-       u32 inst_len;
-       u32 data_len;
-       int i;
-       u32 done;
-       u32 reg_offset;
-       int ret;
-
-       IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
-       priv->ucode_type = UCODE_RT;
-
-       /* make sure bootstrap program is no larger than BSM's SRAM size */
-       if (len > IWL49_MAX_BSM_SIZE)
-               return -EINVAL;
-
-       /* Tell bootstrap uCode where to find the "Initialize" uCode
-        *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
-        * NOTE:  iwl_init_alive_start() will replace these values,
-        *        after the "initialize" uCode has run, to point to
-        *        runtime/protocol instructions and backup data cache.
-        */
-       pinst = priv->ucode_init.p_addr >> 4;
-       pdata = priv->ucode_init_data.p_addr >> 4;
-       inst_len = priv->ucode_init.len;
-       data_len = priv->ucode_init_data.len;
-
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
-       /* Fill BSM memory with bootstrap instructions */
-       for (reg_offset = BSM_SRAM_LOWER_BOUND;
-            reg_offset < BSM_SRAM_LOWER_BOUND + len;
-            reg_offset += sizeof(u32), image++)
-               _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
-       ret = iwl4965_verify_bsm(priv);
-       if (ret)
-               return ret;
-
-       /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
-       iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
-       iwl_legacy_write_prph(priv,
-                       BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
-       iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
-       /* Load bootstrap code into instruction SRAM now,
-        *   to prepare to load "initialize" uCode */
-       iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
-       /* Wait for load of bootstrap uCode to finish */
-       for (i = 0; i < 100; i++) {
-               done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
-               if (!(done & BSM_WR_CTRL_REG_BIT_START))
-                       break;
-               udelay(10);
-       }
-       if (i < 100)
-               IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
-       else {
-               IWL_ERR(priv, "BSM write did not complete!\n");
-               return -EIO;
-       }
-
-       /* Enable future boot loads whenever power management unit triggers it
-        *   (e.g. when powering back up after power-save shutdown) */
-       iwl_legacy_write_prph(priv,
-                       BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
-       return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-       int ret = 0;
-
-       /* bits 35:4 for 4965 */
-       pinst = priv->ucode_code.p_addr >> 4;
-       pdata = priv->ucode_data_backup.p_addr >> 4;
-
-       /* Tell bootstrap uCode where to find image to load */
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
-                                priv->ucode_data.len);
-
-       /* Inst byte count must be last to set up, bit 31 signals uCode
-        *   that all new ptr/size info is in place */
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
-                                priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-       IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
-       return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- *   Voltage, temperature, and MIMO tx gain correction, now stored in priv
- *   (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
-       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "initialize" alive if code weren't properly loaded.  */
-       if (iwl4965_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
-               goto restart;
-       }
-
-       /* Calculate temperature */
-       priv->temperature = iwl4965_hw_get_temperature(priv);
-
-       /* Send pointers to protocol/runtime uCode image ... init code will
-        * load and launch runtime uCode, which will send us another "Alive"
-        * notification. */
-       IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-       if (iwl4965_set_ucode_ptrs(priv)) {
-               /* Runtime instruction load won't happen;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
-               goto restart;
-       }
-       return;
-
-restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool iw4965_is_ht40_channel(__le32 rxon_flags)
-{
-       int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
-                                   >> RXON_FLG_CHANNEL_MODE_POS;
-       return ((chan_mod == CHANNEL_MODE_PURE_40) ||
-                 (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       u16 radio_cfg;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
-       /* write radio config values to register */
-       if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
-               iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                           EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
-                           EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
-                           EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
-       /* set CSR_HW_CONFIG_REG for uCode use */
-       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
-                   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
-       priv->calib_info = (struct iwl_eeprom_calib_info *)
-               iwl_legacy_eeprom_query_addr(priv,
-                               EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- *  ... once chain noise is calibrated the first time, it's good forever.  */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
-       struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
-       if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
-           iwl_legacy_is_any_associated(priv)) {
-               struct iwl_calib_diff_gain_cmd cmd;
-
-               /* clear data for chain noise calibration algorithm */
-               data->chain_noise_a = 0;
-               data->chain_noise_b = 0;
-               data->chain_noise_c = 0;
-               data->chain_signal_a = 0;
-               data->chain_signal_b = 0;
-               data->chain_signal_c = 0;
-               data->beacon_count = 0;
-
-               memset(&cmd, 0, sizeof(cmd));
-               cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
-               cmd.diff_gain_a = 0;
-               cmd.diff_gain_b = 0;
-               cmd.diff_gain_c = 0;
-               if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
-                                sizeof(cmd), &cmd))
-                       IWL_ERR(priv,
-                               "Could not send REPLY_PHY_CALIBRATION_CMD\n");
-               data->state = IWL_CHAIN_NOISE_ACCUMULATE;
-               IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
-       }
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
-       .min_nrg_cck = 97,
-       .max_nrg_cck = 0, /* not used, set to 0 */
-
-       .auto_corr_min_ofdm = 85,
-       .auto_corr_min_ofdm_mrc = 170,
-       .auto_corr_min_ofdm_x1 = 105,
-       .auto_corr_min_ofdm_mrc_x1 = 220,
-
-       .auto_corr_max_ofdm = 120,
-       .auto_corr_max_ofdm_mrc = 210,
-       .auto_corr_max_ofdm_x1 = 140,
-       .auto_corr_max_ofdm_mrc_x1 = 270,
-
-       .auto_corr_min_cck = 125,
-       .auto_corr_max_cck = 200,
-       .auto_corr_min_cck_mrc = 200,
-       .auto_corr_max_cck_mrc = 400,
-
-       .nrg_th_cck = 100,
-       .nrg_th_ofdm = 100,
-
-       .barker_corr_th_min = 190,
-       .barker_corr_th_min_mrc = 390,
-       .nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
-       /* want Kelvin */
-       priv->hw_params.ct_kill_threshold =
-               CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
-       if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
-           priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
-               priv->cfg->base_params->num_of_queues =
-                       priv->cfg->mod_params->num_of_queues;
-
-       priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-       priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
-       priv->hw_params.scd_bc_tbls_size =
-                       priv->cfg->base_params->num_of_queues *
-                       sizeof(struct iwl4965_scd_bc_tbl);
-       priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
-       priv->hw_params.max_stations = IWL4965_STATION_COUNT;
-       priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
-       priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
-       priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
-       priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
-       priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
-       priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
-       priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
-       priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
-       priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
-       priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
-       iwl4965_set_ct_threshold(priv);
-
-       priv->hw_params.sens = &iwl4965_sensitivity;
-       priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
-
-       return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
-       s32 sign = 1;
-
-       if (num < 0) {
-               sign = -sign;
-               num = -num;
-       }
-       if (denom < 0) {
-               sign = -sign;
-               denom = -denom;
-       }
-       *res = 1;
-       *res = ((num * 2 + denom) / (denom * 2)) * sign;
-
-       return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
-                                           s32 current_voltage)
-{
-       s32 comp = 0;
-
-       if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
-           (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
-               return 0;
-
-       iwl4965_math_div_round(current_voltage - eeprom_voltage,
-                              TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
-       if (current_voltage > eeprom_voltage)
-               comp *= 2;
-       if ((comp < -2) || (comp > 2))
-               comp = 0;
-
-       return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
-       if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
-               return CALIB_CH_GROUP_5;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
-               return CALIB_CH_GROUP_1;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
-               return CALIB_CH_GROUP_2;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
-               return CALIB_CH_GROUP_3;
-
-       if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
-           channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
-               return CALIB_CH_GROUP_4;
-
-       return -EINVAL;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
-       s32 b = -1;
-
-       for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
-               if (priv->calib_info->band_info[b].ch_from == 0)
-                       continue;
-
-               if ((channel >= priv->calib_info->band_info[b].ch_from)
-                   && (channel <= priv->calib_info->band_info[b].ch_to))
-                       break;
-       }
-
-       return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
-       s32 val;
-
-       if (x2 == x1)
-               return y1;
-       else {
-               iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
-               return val + y2;
-       }
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest.  Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
-                                   struct iwl_eeprom_calib_ch_info *chan_info)
-{
-       s32 s = -1;
-       u32 c;
-       u32 m;
-       const struct iwl_eeprom_calib_measure *m1;
-       const struct iwl_eeprom_calib_measure *m2;
-       struct iwl_eeprom_calib_measure *omeas;
-       u32 ch_i1;
-       u32 ch_i2;
-
-       s = iwl4965_get_sub_band(priv, channel);
-       if (s >= EEPROM_TX_POWER_BANDS) {
-               IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
-               return -1;
-       }
-
-       ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
-       ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
-       chan_info->ch_num = (u8) channel;
-
-       IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
-                         channel, s, ch_i1, ch_i2);
-
-       for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
-               for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
-                       m1 = &(priv->calib_info->band_info[s].ch1.
-                              measurements[c][m]);
-                       m2 = &(priv->calib_info->band_info[s].ch2.
-                              measurements[c][m]);
-                       omeas = &(chan_info->measurements[c][m]);
-
-                       omeas->actual_pow =
-                           (u8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->actual_pow,
-                                                          ch_i2,
-                                                          m2->actual_pow);
-                       omeas->gain_idx =
-                           (u8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->gain_idx, ch_i2,
-                                                          m2->gain_idx);
-                       omeas->temperature =
-                           (u8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->temperature,
-                                                          ch_i2,
-                                                          m2->temperature);
-                       omeas->pa_det =
-                           (s8) iwl4965_interpolate_value(channel, ch_i1,
-                                                          m1->pa_det, ch_i2,
-                                                          m2->pa_det);
-
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
-                               m1->actual_pow, m2->actual_pow, omeas->actual_pow);
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
-                               m1->gain_idx, m2->gain_idx, omeas->gain_idx);
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
-                               m1->pa_det, m2->pa_det, omeas->pa_det);
-                       IWL_DEBUG_TXPOWER(priv,
-                               "chain %d meas %d  T1=%d  T2=%d  T=%d\n", c, m,
-                               m1->temperature, m2->temperature,
-                               omeas->temperature);
-               }
-       }
-
-       return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
-       10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
-       10                      /* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
-       s32 degrees_per_05db_a;
-       s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
-       {9, 2},                 /* group 0 5.2, ch  34-43 */
-       {4, 1},                 /* group 1 5.2, ch  44-70 */
-       {4, 1},                 /* group 2 5.2, ch  71-124 */
-       {4, 1},                 /* group 3 5.2, ch 125-200 */
-       {3, 1}                  /* group 4 2.4, ch   all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
-       if (!band) {
-               if ((rate_power_index & 7) <= 4)
-                       return MIN_TX_GAIN_INDEX_52GHZ_EXT;
-       }
-       return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
-       u8 dsp;
-       u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
-       /* 5.2GHz power gain index table */
-       {
-        {123, 0x3F},           /* highest txpower */
-        {117, 0x3F},
-        {110, 0x3F},
-        {104, 0x3F},
-        {98, 0x3F},
-        {110, 0x3E},
-        {104, 0x3E},
-        {98, 0x3E},
-        {110, 0x3D},
-        {104, 0x3D},
-        {98, 0x3D},
-        {110, 0x3C},
-        {104, 0x3C},
-        {98, 0x3C},
-        {110, 0x3B},
-        {104, 0x3B},
-        {98, 0x3B},
-        {110, 0x3A},
-        {104, 0x3A},
-        {98, 0x3A},
-        {110, 0x39},
-        {104, 0x39},
-        {98, 0x39},
-        {110, 0x38},
-        {104, 0x38},
-        {98, 0x38},
-        {110, 0x37},
-        {104, 0x37},
-        {98, 0x37},
-        {110, 0x36},
-        {104, 0x36},
-        {98, 0x36},
-        {110, 0x35},
-        {104, 0x35},
-        {98, 0x35},
-        {110, 0x34},
-        {104, 0x34},
-        {98, 0x34},
-        {110, 0x33},
-        {104, 0x33},
-        {98, 0x33},
-        {110, 0x32},
-        {104, 0x32},
-        {98, 0x32},
-        {110, 0x31},
-        {104, 0x31},
-        {98, 0x31},
-        {110, 0x30},
-        {104, 0x30},
-        {98, 0x30},
-        {110, 0x25},
-        {104, 0x25},
-        {98, 0x25},
-        {110, 0x24},
-        {104, 0x24},
-        {98, 0x24},
-        {110, 0x23},
-        {104, 0x23},
-        {98, 0x23},
-        {110, 0x22},
-        {104, 0x18},
-        {98, 0x18},
-        {110, 0x17},
-        {104, 0x17},
-        {98, 0x17},
-        {110, 0x16},
-        {104, 0x16},
-        {98, 0x16},
-        {110, 0x15},
-        {104, 0x15},
-        {98, 0x15},
-        {110, 0x14},
-        {104, 0x14},
-        {98, 0x14},
-        {110, 0x13},
-        {104, 0x13},
-        {98, 0x13},
-        {110, 0x12},
-        {104, 0x08},
-        {98, 0x08},
-        {110, 0x07},
-        {104, 0x07},
-        {98, 0x07},
-        {110, 0x06},
-        {104, 0x06},
-        {98, 0x06},
-        {110, 0x05},
-        {104, 0x05},
-        {98, 0x05},
-        {110, 0x04},
-        {104, 0x04},
-        {98, 0x04},
-        {110, 0x03},
-        {104, 0x03},
-        {98, 0x03},
-        {110, 0x02},
-        {104, 0x02},
-        {98, 0x02},
-        {110, 0x01},
-        {104, 0x01},
-        {98, 0x01},
-        {110, 0x00},
-        {104, 0x00},
-        {98, 0x00},
-        {93, 0x00},
-        {88, 0x00},
-        {83, 0x00},
-        {78, 0x00},
-        },
-       /* 2.4GHz power gain index table */
-       {
-        {110, 0x3f},           /* highest txpower */
-        {104, 0x3f},
-        {98, 0x3f},
-        {110, 0x3e},
-        {104, 0x3e},
-        {98, 0x3e},
-        {110, 0x3d},
-        {104, 0x3d},
-        {98, 0x3d},
-        {110, 0x3c},
-        {104, 0x3c},
-        {98, 0x3c},
-        {110, 0x3b},
-        {104, 0x3b},
-        {98, 0x3b},
-        {110, 0x3a},
-        {104, 0x3a},
-        {98, 0x3a},
-        {110, 0x39},
-        {104, 0x39},
-        {98, 0x39},
-        {110, 0x38},
-        {104, 0x38},
-        {98, 0x38},
-        {110, 0x37},
-        {104, 0x37},
-        {98, 0x37},
-        {110, 0x36},
-        {104, 0x36},
-        {98, 0x36},
-        {110, 0x35},
-        {104, 0x35},
-        {98, 0x35},
-        {110, 0x34},
-        {104, 0x34},
-        {98, 0x34},
-        {110, 0x33},
-        {104, 0x33},
-        {98, 0x33},
-        {110, 0x32},
-        {104, 0x32},
-        {98, 0x32},
-        {110, 0x31},
-        {104, 0x31},
-        {98, 0x31},
-        {110, 0x30},
-        {104, 0x30},
-        {98, 0x30},
-        {110, 0x6},
-        {104, 0x6},
-        {98, 0x6},
-        {110, 0x5},
-        {104, 0x5},
-        {98, 0x5},
-        {110, 0x4},
-        {104, 0x4},
-        {98, 0x4},
-        {110, 0x3},
-        {104, 0x3},
-        {98, 0x3},
-        {110, 0x2},
-        {104, 0x2},
-        {98, 0x2},
-        {110, 0x1},
-        {104, 0x1},
-        {98, 0x1},
-        {110, 0x0},
-        {104, 0x0},
-        {98, 0x0},
-        {97, 0},
-        {96, 0},
-        {95, 0},
-        {94, 0},
-        {93, 0},
-        {92, 0},
-        {91, 0},
-        {90, 0},
-        {89, 0},
-        {88, 0},
-        {87, 0},
-        {86, 0},
-        {85, 0},
-        {84, 0},
-        {83, 0},
-        {82, 0},
-        {81, 0},
-        {80, 0},
-        {79, 0},
-        {78, 0},
-        {77, 0},
-        {76, 0},
-        {75, 0},
-        {74, 0},
-        {73, 0},
-        {72, 0},
-        {71, 0},
-        {70, 0},
-        {69, 0},
-        {68, 0},
-        {67, 0},
-        {66, 0},
-        {65, 0},
-        {64, 0},
-        {63, 0},
-        {62, 0},
-        {61, 0},
-        {60, 0},
-        {59, 0},
-        }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
-                                   u8 is_ht40, u8 ctrl_chan_high,
-                                   struct iwl4965_tx_power_db *tx_power_tbl)
-{
-       u8 saturation_power;
-       s32 target_power;
-       s32 user_target_power;
-       s32 power_limit;
-       s32 current_temp;
-       s32 reg_limit;
-       s32 current_regulatory;
-       s32 txatten_grp = CALIB_CH_GROUP_MAX;
-       int i;
-       int c;
-       const struct iwl_channel_info *ch_info = NULL;
-       struct iwl_eeprom_calib_ch_info ch_eeprom_info;
-       const struct iwl_eeprom_calib_measure *measurement;
-       s16 voltage;
-       s32 init_voltage;
-       s32 voltage_compensation;
-       s32 degrees_per_05db_num;
-       s32 degrees_per_05db_denom;
-       s32 factory_temp;
-       s32 temperature_comp[2];
-       s32 factory_gain_index[2];
-       s32 factory_actual_pwr[2];
-       s32 power_index;
-
-       /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
-        *   are used for indexing into txpower table) */
-       user_target_power = 2 * priv->tx_power_user_lmt;
-
-       /* Get current (RXON) channel, band, width */
-       IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
-                         is_ht40);
-
-       ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
-
-       if (!iwl_legacy_is_channel_valid(ch_info))
-               return -EINVAL;
-
-       /* get txatten group, used to select 1) thermal txpower adjustment
-        *   and 2) mimo txpower balance between Tx chains. */
-       txatten_grp = iwl4965_get_tx_atten_grp(channel);
-       if (txatten_grp < 0) {
-               IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
-                         channel);
-               return txatten_grp;
-       }
-
-       IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
-                         channel, txatten_grp);
-
-       if (is_ht40) {
-               if (ctrl_chan_high)
-                       channel -= 2;
-               else
-                       channel += 2;
-       }
-
-       /* hardware txpower limits ...
-        * saturation (clipping distortion) txpowers are in half-dBm */
-       if (band)
-               saturation_power = priv->calib_info->saturation_power24;
-       else
-               saturation_power = priv->calib_info->saturation_power52;
-
-       if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
-           saturation_power > IWL_TX_POWER_SATURATION_MAX) {
-               if (band)
-                       saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
-               else
-                       saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
-       }
-
-       /* regulatory txpower limits ... reg_limit values are in half-dBm,
-        *   max_power_avg values are in dBm, convert * 2 */
-       if (is_ht40)
-               reg_limit = ch_info->ht40_max_power_avg * 2;
-       else
-               reg_limit = ch_info->max_power_avg * 2;
-
-       if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
-           (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
-               if (band)
-                       reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
-               else
-                       reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
-       }
-
-       /* Interpolate txpower calibration values for this channel,
-        *   based on factory calibration tests on spaced channels. */
-       iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
-       /* calculate tx gain adjustment based on power supply voltage */
-       voltage = le16_to_cpu(priv->calib_info->voltage);
-       init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
-       voltage_compensation =
-           iwl4965_get_voltage_compensation(voltage, init_voltage);
-
-       IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
-                         init_voltage,
-                         voltage, voltage_compensation);
-
-       /* get current temperature (Celsius) */
-       current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
-       current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
-       current_temp = KELVIN_TO_CELSIUS(current_temp);
-
-       /* select thermal txpower adjustment params, based on channel group
-        *   (same frequency group used for mimo txatten adjustment) */
-       degrees_per_05db_num =
-           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
-       degrees_per_05db_denom =
-           tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
-       /* get per-chain txpower values from factory measurements */
-       for (c = 0; c < 2; c++) {
-               measurement = &ch_eeprom_info.measurements[c][1];
-
-               /* txgain adjustment (in half-dB steps) based on difference
-                *   between factory and current temperature */
-               factory_temp = measurement->temperature;
-               iwl4965_math_div_round((current_temp - factory_temp) *
-                                      degrees_per_05db_denom,
-                                      degrees_per_05db_num,
-                                      &temperature_comp[c]);
-
-               factory_gain_index[c] = measurement->gain_idx;
-               factory_actual_pwr[c] = measurement->actual_pow;
-
-               IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
-               IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
-                                 "curr tmp %d, comp %d steps\n",
-                                 factory_temp, current_temp,
-                                 temperature_comp[c]);
-
-               IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
-                                 factory_gain_index[c],
-                                 factory_actual_pwr[c]);
-       }
-
-       /* for each of 33 bit-rates (including 1 for CCK) */
-       for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
-               u8 is_mimo_rate;
-               union iwl4965_tx_power_dual_stream tx_power;
-
-               /* for mimo, reduce each chain's txpower by half
-                * (3dB, 6 steps), so total output power is regulatory
-                * compliant. */
-               if (i & 0x8) {
-                       current_regulatory = reg_limit -
-                           IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
-                       is_mimo_rate = 1;
-               } else {
-                       current_regulatory = reg_limit;
-                       is_mimo_rate = 0;
-               }
-
-               /* find txpower limit, either hardware or regulatory */
-               power_limit = saturation_power - back_off_table[i];
-               if (power_limit > current_regulatory)
-                       power_limit = current_regulatory;
-
-               /* reduce user's txpower request if necessary
-                * for this rate on this channel */
-               target_power = user_target_power;
-               if (target_power > power_limit)
-                       target_power = power_limit;
-
-               IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
-                                 i, saturation_power - back_off_table[i],
-                                 current_regulatory, user_target_power,
-                                 target_power);
-
-               /* for each of 2 Tx chains (radio transmitters) */
-               for (c = 0; c < 2; c++) {
-                       s32 atten_value;
-
-                       if (is_mimo_rate)
-                               atten_value =
-                                   (s32)le32_to_cpu(priv->card_alive_init.
-                                   tx_atten[txatten_grp][c]);
-                       else
-                               atten_value = 0;
-
-                       /* calculate index; higher index means lower txpower */
-                       power_index = (u8) (factory_gain_index[c] -
-                                           (target_power -
-                                            factory_actual_pwr[c]) -
-                                           temperature_comp[c] -
-                                           voltage_compensation +
-                                           atten_value);
-
-/*                     IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
-                                               power_index); */
-
-                       if (power_index < get_min_power_index(i, band))
-                               power_index = get_min_power_index(i, band);
-
-                       /* adjust 5 GHz index to support negative indexes */
-                       if (!band)
-                               power_index += 9;
-
-                       /* CCK, rate 32, reduce txpower for CCK */
-                       if (i == POWER_TABLE_CCK_ENTRY)
-                               power_index +=
-                                   IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
-                       /* stay within the table! */
-                       if (power_index > 107) {
-                               IWL_WARN(priv, "txpower index %d > 107\n",
-                                           power_index);
-                               power_index = 107;
-                       }
-                       if (power_index < 0) {
-                               IWL_WARN(priv, "txpower index %d < 0\n",
-                                           power_index);
-                               power_index = 0;
-                       }
-
-                       /* fill txpower command for this rate/chain */
-                       tx_power.s.radio_tx_gain[c] =
-                               gain_table[band][power_index].radio;
-                       tx_power.s.dsp_predis_atten[c] =
-                               gain_table[band][power_index].dsp;
-
-                       IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
-                                         "gain 0x%02x dsp %d\n",
-                                         c, atten_value, power_index,
-                                       tx_power.s.radio_tx_gain[c],
-                                       tx_power.s.dsp_predis_atten[c]);
-               } /* for each chain */
-
-               tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
-       } /* for each rate */
-
-       return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
-       struct iwl4965_txpowertable_cmd cmd = { 0 };
-       int ret;
-       u8 band = 0;
-       bool is_ht40 = false;
-       u8 ctrl_chan_high = 0;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
-                     "TX Power requested while scanning!\n"))
-               return -EAGAIN;
-
-       band = priv->band == IEEE80211_BAND_2GHZ;
-
-       is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
-
-       if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
-               ctrl_chan_high = 1;
-
-       cmd.band = band;
-       cmd.channel = ctx->active.channel;
-
-       ret = iwl4965_fill_txpower_tbl(priv, band,
-                               le16_to_cpu(ctx->active.channel),
-                               is_ht40, ctrl_chan_high, &cmd.tx_power);
-       if (ret)
-               goto out;
-
-       ret = iwl_legacy_send_cmd_pdu(priv,
-                        REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
-       return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
-                                  struct iwl_rxon_context *ctx)
-{
-       int ret = 0;
-       struct iwl4965_rxon_assoc_cmd rxon_assoc;
-       const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
-       const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
-       if ((rxon1->flags == rxon2->flags) &&
-           (rxon1->filter_flags == rxon2->filter_flags) &&
-           (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
-           (rxon1->ofdm_ht_single_stream_basic_rates ==
-            rxon2->ofdm_ht_single_stream_basic_rates) &&
-           (rxon1->ofdm_ht_dual_stream_basic_rates ==
-            rxon2->ofdm_ht_dual_stream_basic_rates) &&
-           (rxon1->rx_chain == rxon2->rx_chain) &&
-           (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
-               IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
-               return 0;
-       }
-
-       rxon_assoc.flags = ctx->staging.flags;
-       rxon_assoc.filter_flags = ctx->staging.filter_flags;
-       rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
-       rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
-       rxon_assoc.reserved = 0;
-       rxon_assoc.ofdm_ht_single_stream_basic_rates =
-           ctx->staging.ofdm_ht_single_stream_basic_rates;
-       rxon_assoc.ofdm_ht_dual_stream_basic_rates =
-           ctx->staging.ofdm_ht_dual_stream_basic_rates;
-       rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
-       ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
-                                    sizeof(rxon_assoc), &rxon_assoc, NULL);
-
-       return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       /* cast away the const for active_rxon in this function */
-       struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
-       int ret;
-       bool new_assoc =
-               !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EBUSY;
-
-       if (!ctx->is_active)
-               return 0;
-
-       /* always get timestamp with Rx frame */
-       ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
-       ret = iwl_legacy_check_rxon_cmd(priv, ctx);
-       if (ret) {
-               IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
-               return -EINVAL;
-       }
-
-       /*
-        * receive commit_rxon request
-        * abort any previous channel switch if still in process
-        */
-       if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
-           (priv->switch_channel != ctx->staging.channel)) {
-               IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
-                     le16_to_cpu(priv->switch_channel));
-               iwl_legacy_chswitch_done(priv, false);
-       }
-
-       /* If we don't need to send a full RXON, we can use
-        * iwl_rxon_assoc_cmd which is used to reconfigure filter
-        * and other flags for the current radio configuration. */
-       if (!iwl_legacy_full_rxon_required(priv, ctx)) {
-               ret = iwl_legacy_send_rxon_assoc(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
-                       return ret;
-               }
-
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_legacy_print_rx_config_cmd(priv, ctx);
-               /*
-                * We do not commit tx power settings while channel changing,
-                * do it now if tx power changed.
-                */
-               iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-               return 0;
-       }
-
-       /* If we are currently associated and the new config requires
-        * an RXON_ASSOC and the new config wants the associated mask enabled,
-        * we must clear the associated from the active configuration
-        * before we apply the new config */
-       if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
-               IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
-                                      sizeof(struct iwl_legacy_rxon_cmd),
-                                      active_rxon);
-
-               /* If the mask clearing failed then we set
-                * active_rxon back to what it was previously */
-               if (ret) {
-                       active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
-                       IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
-                       return ret;
-               }
-               iwl_legacy_clear_ucode_stations(priv, ctx);
-               iwl_legacy_restore_stations(priv, ctx);
-               ret = iwl4965_restore_default_wep_keys(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-                       return ret;
-               }
-       }
-
-       IWL_DEBUG_INFO(priv, "Sending RXON\n"
-                      "* with%s RXON_FILTER_ASSOC_MSK\n"
-                      "* channel = %d\n"
-                      "* bssid = %pM\n",
-                      (new_assoc ? "" : "out"),
-                      le16_to_cpu(ctx->staging.channel),
-                      ctx->staging.bssid_addr);
-
-       iwl_legacy_set_rxon_hwcrypto(priv, ctx,
-                               !priv->cfg->mod_params->sw_crypto);
-
-       /* Apply the new configuration
-        * RXON unassoc clears the station table in uCode so restoration of
-        * stations is needed after it (the RXON command) completes
-        */
-       if (!new_assoc) {
-               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-                       return ret;
-               }
-               IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-               iwl_legacy_clear_ucode_stations(priv, ctx);
-               iwl_legacy_restore_stations(priv, ctx);
-               ret = iwl4965_restore_default_wep_keys(priv, ctx);
-               if (ret) {
-                       IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
-                       return ret;
-               }
-       }
-       if (new_assoc) {
-               priv->start_calib = 0;
-               /* Apply the new configuration
-                * RXON assoc doesn't clear the station table in uCode,
-                */
-               ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
-                             sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
-               if (ret) {
-                       IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
-                       return ret;
-               }
-               memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
-       }
-       iwl_legacy_print_rx_config_cmd(priv, ctx);
-
-       iwl4965_init_sensitivity(priv);
-
-       /* If we issue a new RXON command which required a tune then we must
-        * send a new TXPOWER command or we won't be able to Tx any frames */
-       ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
-       if (ret) {
-               IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
-                                    struct ieee80211_channel_switch *ch_switch)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       int rc;
-       u8 band = 0;
-       bool is_ht40 = false;
-       u8 ctrl_chan_high = 0;
-       struct iwl4965_channel_switch_cmd cmd;
-       const struct iwl_channel_info *ch_info;
-       u32 switch_time_in_usec, ucode_switch_time;
-       u16 ch;
-       u32 tsf_low;
-       u8 switch_count;
-       u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
-       struct ieee80211_vif *vif = ctx->vif;
-       band = priv->band == IEEE80211_BAND_2GHZ;
-
-       is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
-
-       if (is_ht40 &&
-           (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
-               ctrl_chan_high = 1;
-
-       cmd.band = band;
-       cmd.expect_beacon = 0;
-       ch = ch_switch->channel->hw_value;
-       cmd.channel = cpu_to_le16(ch);
-       cmd.rxon_flags = ctx->staging.flags;
-       cmd.rxon_filter_flags = ctx->staging.filter_flags;
-       switch_count = ch_switch->count;
-       tsf_low = ch_switch->timestamp & 0x0ffffffff;
-       /*
-        * calculate the ucode channel switch time
-        * adding TSF as one of the factor for when to switch
-        */
-       if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
-               if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
-                   beacon_interval)) {
-                       switch_count -= (priv->ucode_beacon_time -
-                               tsf_low) / beacon_interval;
-               } else
-                       switch_count = 0;
-       }
-       if (switch_count <= 1)
-               cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
-       else {
-               switch_time_in_usec =
-                       vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
-               ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
-                                                        switch_time_in_usec,
-                                                        beacon_interval);
-               cmd.switch_time = iwl_legacy_add_beacon_time(priv,
-                                                     priv->ucode_beacon_time,
-                                                     ucode_switch_time,
-                                                     beacon_interval);
-       }
-       IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
-                     cmd.switch_time);
-       ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
-       if (ch_info)
-               cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
-       else {
-               IWL_ERR(priv, "invalid channel switch from %u to %u\n",
-                       ctx->active.channel, ch);
-               return -EFAULT;
-       }
-
-       rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
-                                     ctrl_chan_high, &cmd.tx_power);
-       if (rc) {
-               IWL_DEBUG_11H(priv, "error:%d  fill txpower_tbl\n", rc);
-               return rc;
-       }
-
-       return iwl_legacy_send_cmd_pdu(priv,
-                        REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
-                                           struct iwl_tx_queue *txq,
-                                           u16 byte_cnt)
-{
-       struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
-       int txq_id = txq->q.id;
-       int write_ptr = txq->q.write_ptr;
-       int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
-       __le16 bc_ent;
-
-       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
-       bc_ent = cpu_to_le16(len & 0xFFF);
-       /* Set up byte count within first 256 entries */
-       scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
-       /* If within first 64 entries, duplicate at end */
-       if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
-               scd_bc_tbl[txq_id].
-                       tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
-       s32 temperature;
-       s32 vt;
-       s32 R1, R2, R3;
-       u32 R4;
-
-       if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
-           (priv->_4965.statistics.flag &
-                       STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
-               IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
-               R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
-               R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
-               R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
-               R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
-       } else {
-               IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
-               R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
-               R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
-               R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
-               R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
-       }
-
-       /*
-        * Temperature is only 23 bits, so sign extend out to 32.
-        *
-        * NOTE If we haven't received a statistics notification yet
-        * with an updated temperature, use R4 provided to us in the
-        * "initialize" ALIVE response.
-        */
-       if (!test_bit(STATUS_TEMPERATURE, &priv->status))
-               vt = sign_extend32(R4, 23);
-       else
-               vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
-                                general.common.temperature), 23);
-
-       IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
-       if (R3 == R1) {
-               IWL_ERR(priv, "Calibration conflict R1 == R3\n");
-               return -1;
-       }
-
-       /* Calculate temperature in degrees Kelvin, adjust by 97%.
-        * Add offset to center the adjustment around 0 degrees Centigrade. */
-       temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
-       temperature /= (R3 - R1);
-       temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
-       IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
-                       temperature, KELVIN_TO_CELSIUS(temperature));
-
-       return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD   3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
-       int temp_diff;
-
-       if (!test_bit(STATUS_STATISTICS, &priv->status)) {
-               IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
-               return 0;
-       }
-
-       temp_diff = priv->temperature - priv->last_temperature;
-
-       /* get absolute value */
-       if (temp_diff < 0) {
-               IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
-               temp_diff = -temp_diff;
-       } else if (temp_diff == 0)
-               IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
-       else
-               IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
-       if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
-               IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
-               return 0;
-       }
-
-       IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
-       return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
-       s32 temp;
-
-       temp = iwl4965_hw_get_temperature(priv);
-       if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
-               return;
-
-       if (priv->temperature != temp) {
-               if (priv->temperature)
-                       IWL_DEBUG_TEMP(priv, "Temperature changed "
-                                      "from %dC to %dC\n",
-                                      KELVIN_TO_CELSIUS(priv->temperature),
-                                      KELVIN_TO_CELSIUS(temp));
-               else
-                       IWL_DEBUG_TEMP(priv, "Temperature "
-                                      "initialized to %dC\n",
-                                      KELVIN_TO_CELSIUS(temp));
-       }
-
-       priv->temperature = temp;
-       set_bit(STATUS_TEMPERATURE, &priv->status);
-
-       if (!priv->disable_tx_power_cal &&
-            unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
-            iwl4965_is_temp_calib_needed(priv))
-               queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
-       switch (cmd_id) {
-       case REPLY_RXON:
-               return (u16) sizeof(struct iwl4965_rxon_cmd);
-       default:
-               return len;
-       }
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
-                                                               u8 *data)
-{
-       struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
-       addsta->mode = cmd->mode;
-       memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
-       memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
-       addsta->station_flags = cmd->station_flags;
-       addsta->station_flags_msk = cmd->station_flags_msk;
-       addsta->tid_disable_tx = cmd->tid_disable_tx;
-       addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
-       addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
-       addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-       addsta->sleep_tx_count = cmd->sleep_tx_count;
-       addsta->reserved1 = cpu_to_le16(0);
-       addsta->reserved2 = cpu_to_le16(0);
-
-       return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
-       return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
-                                     struct iwl_ht_agg *agg,
-                                     struct iwl4965_tx_resp *tx_resp,
-                                     int txq_id, u16 start_idx)
-{
-       u16 status;
-       struct agg_tx_status *frame_status = tx_resp->u.agg_status;
-       struct ieee80211_tx_info *info = NULL;
-       struct ieee80211_hdr *hdr = NULL;
-       u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
-       int i, sh, idx;
-       u16 seq;
-       if (agg->wait_for_ba)
-               IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
-       agg->frame_count = tx_resp->frame_count;
-       agg->start_idx = start_idx;
-       agg->rate_n_flags = rate_n_flags;
-       agg->bitmap = 0;
-
-       /* num frames attempted by Tx command */
-       if (agg->frame_count == 1) {
-               /* Only one frame was attempted; no block-ack will arrive */
-               status = le16_to_cpu(frame_status[0].status);
-               idx = start_idx;
-
-               IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
-                                  agg->frame_count, agg->start_idx, idx);
-
-               info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
-               info->status.rates[0].count = tx_resp->failure_frame + 1;
-               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-               info->flags |= iwl4965_tx_status_to_mac80211(status);
-               iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
-
-               IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
-                                   status & 0xff, tx_resp->failure_frame);
-               IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
-               agg->wait_for_ba = 0;
-       } else {
-               /* Two or more frames were attempted; expect block-ack */
-               u64 bitmap = 0;
-               int start = agg->start_idx;
-
-               /* Construct bit-map of pending frames within Tx window */
-               for (i = 0; i < agg->frame_count; i++) {
-                       u16 sc;
-                       status = le16_to_cpu(frame_status[i].status);
-                       seq  = le16_to_cpu(frame_status[i].sequence);
-                       idx = SEQ_TO_INDEX(seq);
-                       txq_id = SEQ_TO_QUEUE(seq);
-
-                       if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
-                                     AGG_TX_STATE_ABORT_MSK))
-                               continue;
-
-                       IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
-                                          agg->frame_count, txq_id, idx);
-
-                       hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
-                       if (!hdr) {
-                               IWL_ERR(priv,
-                                       "BUG_ON idx doesn't point to valid skb"
-                                       " idx=%d, txq_id=%d\n", idx, txq_id);
-                               return -1;
-                       }
-
-                       sc = le16_to_cpu(hdr->seq_ctrl);
-                       if (idx != (SEQ_TO_SN(sc) & 0xff)) {
-                               IWL_ERR(priv,
-                                       "BUG_ON idx doesn't match seq control"
-                                       " idx=%d, seq_idx=%d, seq=%d\n",
-                                       idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
-                               return -1;
-                       }
-
-                       IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
-                                          i, idx, SEQ_TO_SN(sc));
-
-                       sh = idx - start;
-                       if (sh > 64) {
-                               sh = (start - idx) + 0xff;
-                               bitmap = bitmap << sh;
-                               sh = 0;
-                               start = idx;
-                       } else if (sh < -64)
-                               sh  = 0xff - (start - idx);
-                       else if (sh < 0) {
-                               sh = start - idx;
-                               start = idx;
-                               bitmap = bitmap << sh;
-                               sh = 0;
-                       }
-                       bitmap |= 1ULL << sh;
-                       IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
-                                          start, (unsigned long long)bitmap);
-               }
-
-               agg->bitmap = bitmap;
-               agg->start_idx = start;
-               IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
-                                  agg->frame_count, agg->start_idx,
-                                  (unsigned long long)agg->bitmap);
-
-               if (bitmap)
-                       agg->wait_for_ba = 1;
-       }
-       return 0;
-}
-
-static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
-{
-       int i;
-       int start = 0;
-       int ret = IWL_INVALID_STATION;
-       unsigned long flags;
-
-       if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
-               start = IWL_STA_ID;
-
-       if (is_broadcast_ether_addr(addr))
-               return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       for (i = start; i < priv->hw_params.max_stations; i++)
-               if (priv->stations[i].used &&
-                   (!compare_ether_addr(priv->stations[i].sta.sta.addr,
-                                        addr))) {
-                       ret = i;
-                       goto out;
-               }
-
-       IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
-                             addr, priv->num_stations);
-
- out:
-       /*
-        * It may be possible that more commands interacting with stations
-        * arrive before we completed processing the adding of
-        * station
-        */
-       if (ret != IWL_INVALID_STATION &&
-           (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
-            ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
-             (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
-               IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
-                       ret);
-               ret = IWL_INVALID_STATION;
-       }
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-       return ret;
-}
-
-static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
-       if (priv->iw_mode == NL80211_IFTYPE_STATION) {
-               return IWL_AP_ID;
-       } else {
-               u8 *da = ieee80211_get_DA(hdr);
-               return iwl4965_find_station(priv, da);
-       }
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-       int txq_id = SEQ_TO_QUEUE(sequence);
-       int index = SEQ_TO_INDEX(sequence);
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct ieee80211_hdr *hdr;
-       struct ieee80211_tx_info *info;
-       struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
-       u32  status = le32_to_cpu(tx_resp->u.status);
-       int uninitialized_var(tid);
-       int sta_id;
-       int freed;
-       u8 *qc = NULL;
-       unsigned long flags;
-
-       if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
-               IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
-                         "is out of range [0-%d] %d %d\n", txq_id,
-                         index, txq->q.n_bd, txq->q.write_ptr,
-                         txq->q.read_ptr);
-               return;
-       }
-
-       txq->time_stamp = jiffies;
-       info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
-       memset(&info->status, 0, sizeof(info->status));
-
-       hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
-       if (ieee80211_is_data_qos(hdr->frame_control)) {
-               qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & 0xf;
-       }
-
-       sta_id = iwl4965_get_ra_sta_id(priv, hdr);
-       if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
-               IWL_ERR(priv, "Station not known\n");
-               return;
-       }
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       if (txq->sched_retry) {
-               const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
-               struct iwl_ht_agg *agg = NULL;
-               WARN_ON(!qc);
-
-               agg = &priv->stations[sta_id].tid[tid].agg;
-
-               iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
-               /* check if BAR is needed */
-               if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
-                       info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
-               if (txq->q.read_ptr != (scd_ssn & 0xff)) {
-                       index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
-                                                               txq->q.n_bd);
-                       IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
-                                          "%d index %d\n", scd_ssn , index);
-                       freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
-                       if (qc)
-                               iwl4965_free_tfds_in_queue(priv, sta_id,
-                                                      tid, freed);
-
-                       if (priv->mac80211_registered &&
-                           (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
-                                && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
-                               iwl_legacy_wake_queue(priv, txq);
-               }
-       } else {
-               info->status.rates[0].count = tx_resp->failure_frame + 1;
-               info->flags |= iwl4965_tx_status_to_mac80211(status);
-               iwl4965_hwrate_to_tx_control(priv,
-                                       le32_to_cpu(tx_resp->rate_n_flags),
-                                       info);
-
-               IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
-                                  "rate_n_flags 0x%x retries %d\n",
-                                  txq_id,
-                                  iwl4965_get_tx_fail_reason(status), status,
-                                  le32_to_cpu(tx_resp->rate_n_flags),
-                                  tx_resp->failure_frame);
-
-               freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
-               if (qc && likely(sta_id != IWL_INVALID_STATION))
-                       iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-               else if (sta_id == IWL_INVALID_STATION)
-                       IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
-               if (priv->mac80211_registered &&
-                   (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
-                       iwl_legacy_wake_queue(priv, txq);
-       }
-       if (qc && likely(sta_id != IWL_INVALID_STATION))
-               iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
-
-       iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
-       u8 rate __maybe_unused =
-               iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
-       IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
-               "tsf:0x%.8x%.8x rate:%d\n",
-               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
-               beacon->beacon_notify_hdr.failure_frame,
-               le32_to_cpu(beacon->ibss_mgr_status),
-               le32_to_cpu(beacon->high_tsf),
-               le32_to_cpu(beacon->low_tsf), rate);
-
-       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
-       /* Legacy Rx frames */
-       priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
-       /* Tx response */
-       priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
-       priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
-       .rxon_assoc = iwl4965_send_rxon_assoc,
-       .commit_rxon = iwl4965_commit_rxon,
-       .set_rxon_chain = iwl4965_set_rxon_chain,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       /*
-        * Since setting the RXON may have been deferred while
-        * performing the scan, fire one off if needed
-        */
-       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-               iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif = ctx->vif;
-       struct ieee80211_conf *conf = NULL;
-       int ret = 0;
-
-       if (!vif || !priv->is_open)
-               return;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       iwl_legacy_scan_cancel_timeout(priv, 200);
-
-       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwl_legacy_commit_rxon(priv, ctx);
-
-       ret = iwl_legacy_send_rxon_timing(priv, ctx);
-       if (ret)
-               IWL_WARN(priv, "RXON timing - "
-                           "Attempting to continue.\n");
-
-       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-       iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-       ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
-       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-                       vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
-       if (vif->bss_conf.use_short_preamble)
-               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-               if (vif->bss_conf.use_short_slot)
-                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-       }
-
-       iwl_legacy_commit_rxon(priv, ctx);
-
-       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-                       vif->bss_conf.aid, ctx->active.bssid_addr);
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               iwl4965_send_beacon_cmd(priv);
-               break;
-       default:
-               IWL_ERR(priv, "%s Should not be called in %d mode\n",
-                         __func__, vif->type);
-               break;
-       }
-
-       /* the chain noise calibration will enabled PM upon completion
-        * If chain noise has already been run, then we need to enable
-        * power management here */
-       if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
-               iwl_legacy_power_update_mode(priv, false);
-
-       /* Enable Rx differential gain and sensitivity calibrations */
-       iwl4965_chain_noise_reset(priv);
-       priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif = ctx->vif;
-       int ret = 0;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       /* The following should be done only at AP bring up */
-       if (!iwl_legacy_is_associated_ctx(ctx)) {
-
-               /* RXON - unassoc (to set timing command) */
-               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-               iwl_legacy_commit_rxon(priv, ctx);
-
-               /* RXON Timing */
-               ret = iwl_legacy_send_rxon_timing(priv, ctx);
-               if (ret)
-                       IWL_WARN(priv, "RXON timing failed - "
-                                       "Attempting to continue.\n");
-
-               /* AP has all antennas */
-               priv->chain_noise_data.active_chains =
-                       priv->hw_params.valid_rx_ant;
-               iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-               ctx->staging.assoc_id = 0;
-
-               if (vif->bss_conf.use_short_preamble)
-                       ctx->staging.flags |=
-                               RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &=
-                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-                       if (vif->bss_conf.use_short_slot)
-                               ctx->staging.flags |=
-                                       RXON_FLG_SHORT_SLOT_MSK;
-                       else
-                               ctx->staging.flags &=
-                                       ~RXON_FLG_SHORT_SLOT_MSK;
-               }
-               /* need to send beacon cmd before committing assoc RXON! */
-               iwl4965_send_beacon_cmd(priv);
-               /* restore RXON assoc */
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               iwl_legacy_commit_rxon(priv, ctx);
-       }
-       iwl4965_send_beacon_cmd(priv);
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
-       .get_hcmd_size = iwl4965_get_hcmd_size,
-       .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
-       .request_scan = iwl4965_request_scan,
-       .post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
-       .set_hw_params = iwl4965_hw_set_hw_params,
-       .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
-       .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
-       .txq_free_tfd = iwl4965_hw_txq_free_tfd,
-       .txq_init = iwl4965_hw_tx_queue_init,
-       .rx_handler_setup = iwl4965_rx_handler_setup,
-       .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
-       .init_alive_start = iwl4965_init_alive_start,
-       .load_ucode = iwl4965_load_bsm,
-       .dump_nic_error_log = iwl4965_dump_nic_error_log,
-       .dump_fh = iwl4965_dump_fh,
-       .set_channel_switch = iwl4965_hw_channel_switch,
-       .apm_ops = {
-               .init = iwl_legacy_apm_init,
-               .config = iwl4965_nic_config,
-       },
-       .eeprom_ops = {
-               .regulatory_bands = {
-                       EEPROM_REGULATORY_BAND_1_CHANNELS,
-                       EEPROM_REGULATORY_BAND_2_CHANNELS,
-                       EEPROM_REGULATORY_BAND_3_CHANNELS,
-                       EEPROM_REGULATORY_BAND_4_CHANNELS,
-                       EEPROM_REGULATORY_BAND_5_CHANNELS,
-                       EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
-                       EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
-               },
-               .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
-               .release_semaphore = iwl4965_eeprom_release_semaphore,
-       },
-       .send_tx_power  = iwl4965_send_tx_power,
-       .update_chain_flags = iwl4965_update_chain_flags,
-       .temp_ops = {
-               .temperature = iwl4965_temperature_calib,
-       },
-       .debugfs_ops = {
-               .rx_stats_read = iwl4965_ucode_rx_stats_read,
-               .tx_stats_read = iwl4965_ucode_tx_stats_read,
-               .general_stats_read = iwl4965_ucode_general_stats_read,
-       },
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
-       .post_associate = iwl4965_post_associate,
-       .config_ap = iwl4965_config_ap,
-       .manage_ibss_station = iwl4965_manage_ibss_station,
-       .update_bcast_stations = iwl4965_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
-       .tx = iwl4965_mac_tx,
-       .start = iwl4965_mac_start,
-       .stop = iwl4965_mac_stop,
-       .add_interface = iwl_legacy_mac_add_interface,
-       .remove_interface = iwl_legacy_mac_remove_interface,
-       .change_interface = iwl_legacy_mac_change_interface,
-       .config = iwl_legacy_mac_config,
-       .configure_filter = iwl4965_configure_filter,
-       .set_key = iwl4965_mac_set_key,
-       .update_tkip_key = iwl4965_mac_update_tkip_key,
-       .conf_tx = iwl_legacy_mac_conf_tx,
-       .reset_tsf = iwl_legacy_mac_reset_tsf,
-       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
-       .ampdu_action = iwl4965_mac_ampdu_action,
-       .hw_scan = iwl_legacy_mac_hw_scan,
-       .sta_add = iwl4965_mac_sta_add,
-       .sta_remove = iwl_legacy_mac_sta_remove,
-       .channel_switch = iwl4965_mac_channel_switch,
-       .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
-       .lib = &iwl4965_lib,
-       .hcmd = &iwl4965_hcmd,
-       .utils = &iwl4965_hcmd_utils,
-       .led = &iwl4965_led_ops,
-       .legacy = &iwl4965_legacy_ops,
-       .ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
-       .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
-       .num_of_queues = IWL49_NUM_QUEUES,
-       .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
-       .pll_cfg_val = 0,
-       .set_l0s = true,
-       .use_bsm = true,
-       .led_compensation = 61,
-       .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
-       .wd_timeout = IWL_DEF_WD_TIMEOUT,
-       .temperature_kelvin = true,
-       .ucode_tracing = true,
-       .sensitivity_calib_by_driver = true,
-       .chain_noise_calib_by_driver = true,
-};
-
-struct iwl_cfg iwl4965_cfg = {
-       .name = "Intel(R) Wireless WiFi Link 4965AGN",
-       .fw_name_pre = IWL4965_FW_PRE,
-       .ucode_api_max = IWL4965_UCODE_API_MAX,
-       .ucode_api_min = IWL4965_UCODE_API_MIN,
-       .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
-       .valid_tx_ant = ANT_AB,
-       .valid_rx_ant = ANT_ABC,
-       .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
-       .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
-       .ops = &iwl4965_ops,
-       .mod_params = &iwl4965_mod_params,
-       .base_params = &iwl4965_base_params,
-       .led_mode = IWL_LED_BLINK,
-       /*
-        * Force use of chains B and C for scan RX on 5 GHz band
-        * because the device has off-channel reception on chain A.
-        */
-       .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644 (file)
index 01f8163..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_4965_h__
-#define __iwl_4965_h__
-
-#include "iwl-dev.h"
-
-/* configuration for the _4965 devices */
-extern struct iwl_cfg iwl4965_cfg;
-
-extern struct iwl_mod_params iwl4965_mod_params;
-
-extern struct ieee80211_ops iwl4965_hw_ops;
-
-/* tx queue */
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
-                           int sta_id, int tid, int freed);
-
-/* RXON */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv,
-                               struct iwl_rxon_context *ctx);
-
-/* uCode */
-int iwl4965_verify_ucode(struct iwl_priv *priv);
-
-/* lib */
-void iwl4965_check_abort_status(struct iwl_priv *priv,
-                           u8 frame_count, u32 status);
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_hw_nic_init(struct iwl_priv *priv);
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-
-/* rx */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv);
-void iwl4965_rx_replenish(struct iwl_priv *priv);
-void iwl4965_rx_replenish_now(struct iwl_priv *priv);
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rxq_stop(struct iwl_priv *priv);
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
-                    struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
-                        struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_handle(struct iwl_priv *priv);
-
-/* tx */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                struct iwl_tx_queue *txq,
-                                dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
-                        struct iwl_tx_queue *txq);
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
-                             struct ieee80211_tx_info *info);
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
-                      struct ieee80211_sta *sta, u16 tid);
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
-                          int sta_id, u8 tid, int txq_id);
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb);
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
-
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE:  Acquire priv->lock before calling this function !
- */
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
-                                       struct iwl_tx_queue *txq,
-                                       int tx_fifo_id, int scd_retry);
-
-static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
-{
-       status &= TX_STATUS_MSK;
-
-       switch (status) {
-       case TX_STATUS_SUCCESS:
-       case TX_STATUS_DIRECT_DONE:
-               return IEEE80211_TX_STAT_ACK;
-       case TX_STATUS_FAIL_DEST_PS:
-               return IEEE80211_TX_STAT_TX_FILTERED;
-       default:
-               return 0;
-       }
-}
-
-static inline bool iwl4965_is_tx_success(u32 status)
-{
-       status &= TX_STATUS_MSK;
-       return (status == TX_STATUS_SUCCESS) ||
-              (status == TX_STATUS_DIRECT_DONE);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-
-/* rx */
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb);
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
-                         struct iwl_rx_packet *pkt);
-void iwl4965_rx_statistics(struct iwl_priv *priv,
-                      struct iwl_rx_mem_buffer *rxb);
-void iwl4965_reply_statistics(struct iwl_priv *priv,
-                         struct iwl_rx_mem_buffer *rxb);
-
-/* scan */
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-
-/* station mgmt */
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
-                              struct ieee80211_vif *vif, bool add);
-
-/* hcmd */
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status);
-#else
-static inline const char *
-iwl4965_get_tx_fail_reason(u32 status) { return ""; }
-#endif
-
-/* station management */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx);
-int iwl4965_add_bssid_station(struct iwl_priv *priv,
-                               struct iwl_rxon_context *ctx,
-                            const u8 *addr, u8 *sta_id_r);
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx,
-                              struct ieee80211_key_conf *key);
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx,
-                           struct ieee80211_key_conf *key);
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
-                                struct iwl_rxon_context *ctx);
-int iwl4965_set_dynamic_key(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
-                       struct ieee80211_key_conf *key, u8 sta_id);
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
-                       struct ieee80211_key_conf *key, u8 sta_id);
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
-                        struct iwl_rxon_context *ctx,
-                        struct ieee80211_key_conf *keyconf,
-                        struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
-                       int sta_id, int tid);
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
-                        int tid, u16 ssn);
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
-                       int tid);
-void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
-                       int sta_id, int cnt);
-int iwl4965_update_bcast_stations(struct iwl_priv *priv);
-
-/* rate */
-static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
-{
-       return BIT(ant_idx) << RATE_MCS_ANT_POS;
-}
-
-static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
-{
-       return le32_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
-{
-       return cpu_to_le32(flags|(u32)rate);
-}
-
-/* eeprom */
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
-int  iwl4965_eeprom_check_version(struct iwl_priv *priv);
-
-/* mac80211 handlers (for 4965) */
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwl4965_mac_start(struct ieee80211_hw *hw);
-void iwl4965_mac_stop(struct ieee80211_hw *hw);
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
-                            unsigned int changed_flags,
-                            unsigned int *total_flags,
-                            u64 multicast);
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-                      struct ieee80211_key_conf *key);
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
-                               struct ieee80211_vif *vif,
-                               struct ieee80211_key_conf *keyconf,
-                               struct ieee80211_sta *sta,
-                               u32 iv32, u16 *phase1key);
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
-                           struct ieee80211_vif *vif,
-                           enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                           u8 buf_size);
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
-                      struct ieee80211_vif *vif,
-                      struct ieee80211_sta *sta);
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
-                              struct ieee80211_channel_switch *ch_switch);
-
-#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644 (file)
index 2bd5659..0000000
+++ /dev/null
@@ -1,2661 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
-
-
-MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- *   Able to scan and finding all the available AP
- *   Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-static bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
-
-u32 iwlegacy_debug_level;
-EXPORT_SYMBOL(iwlegacy_debug_level);
-
-const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwlegacy_bcast_addr);
-
-
-/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
-{
-       struct iwl_priv *priv;
-       /* mac80211 allocates memory for this device instance, including
-        *   space for this driver's private structure */
-       struct ieee80211_hw *hw;
-
-       hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
-                               cfg->ops->ieee80211_ops);
-       if (hw == NULL) {
-               pr_err("%s: Can not allocate network device\n",
-                      cfg->name);
-               goto out;
-       }
-
-       priv = hw->priv;
-       priv->hw = hw;
-
-out:
-       return hw;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_all);
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
-                             struct ieee80211_sta_ht_cap *ht_info,
-                             enum ieee80211_band band)
-{
-       u16 max_bit_rate = 0;
-       u8 rx_chains_num = priv->hw_params.rx_chains_num;
-       u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
-       ht_info->cap = 0;
-       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
-       ht_info->ht_supported = true;
-
-       ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-       max_bit_rate = MAX_BIT_RATE_20_MHZ;
-       if (priv->hw_params.ht40_channel & BIT(band)) {
-               ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-               ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
-               ht_info->mcs.rx_mask[4] = 0x01;
-               max_bit_rate = MAX_BIT_RATE_40_MHZ;
-       }
-
-       if (priv->cfg->mod_params->amsdu_size_8K)
-               ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
-       ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
-       ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
-       ht_info->mcs.rx_mask[0] = 0xFF;
-       if (rx_chains_num >= 2)
-               ht_info->mcs.rx_mask[1] = 0xFF;
-       if (rx_chains_num >= 3)
-               ht_info->mcs.rx_mask[2] = 0xFF;
-
-       /* Highest supported Rx data rate */
-       max_bit_rate *= rx_chains_num;
-       WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
-       ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
-       /* Tx MCS capabilities */
-       ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
-       if (tx_chains_num != rx_chains_num) {
-               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
-               ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
-                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-       }
-}
-
-/**
- * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_legacy_init_geos(struct iwl_priv *priv)
-{
-       struct iwl_channel_info *ch;
-       struct ieee80211_supported_band *sband;
-       struct ieee80211_channel *channels;
-       struct ieee80211_channel *geo_ch;
-       struct ieee80211_rate *rates;
-       int i = 0;
-       s8 max_tx_power = 0;
-
-       if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
-           priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
-               IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
-               set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-               return 0;
-       }
-
-       channels = kzalloc(sizeof(struct ieee80211_channel) *
-                          priv->channel_count, GFP_KERNEL);
-       if (!channels)
-               return -ENOMEM;
-
-       rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
-                       GFP_KERNEL);
-       if (!rates) {
-               kfree(channels);
-               return -ENOMEM;
-       }
-
-       /* 5.2GHz channels start after the 2.4GHz channels */
-       sband = &priv->bands[IEEE80211_BAND_5GHZ];
-       sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
-       /* just OFDM */
-       sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
-       sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
-       if (priv->cfg->sku & IWL_SKU_N)
-               iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
-                                        IEEE80211_BAND_5GHZ);
-
-       sband = &priv->bands[IEEE80211_BAND_2GHZ];
-       sband->channels = channels;
-       /* OFDM & CCK */
-       sband->bitrates = rates;
-       sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
-       if (priv->cfg->sku & IWL_SKU_N)
-               iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
-                                        IEEE80211_BAND_2GHZ);
-
-       priv->ieee_channels = channels;
-       priv->ieee_rates = rates;
-
-       for (i = 0;  i < priv->channel_count; i++) {
-               ch = &priv->channel_info[i];
-
-               if (!iwl_legacy_is_channel_valid(ch))
-                       continue;
-
-               sband = &priv->bands[ch->band];
-
-               geo_ch = &sband->channels[sband->n_channels++];
-
-               geo_ch->center_freq =
-                       ieee80211_channel_to_frequency(ch->channel, ch->band);
-               geo_ch->max_power = ch->max_power_avg;
-               geo_ch->max_antenna_gain = 0xff;
-               geo_ch->hw_value = ch->channel;
-
-               if (iwl_legacy_is_channel_valid(ch)) {
-                       if (!(ch->flags & EEPROM_CHANNEL_IBSS))
-                               geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
-                       if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
-                               geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
-                       if (ch->flags & EEPROM_CHANNEL_RADAR)
-                               geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
-                       geo_ch->flags |= ch->ht40_extension_channel;
-
-                       if (ch->max_power_avg > max_tx_power)
-                               max_tx_power = ch->max_power_avg;
-               } else {
-                       geo_ch->flags |= IEEE80211_CHAN_DISABLED;
-               }
-
-               IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
-                               ch->channel, geo_ch->center_freq,
-                               iwl_legacy_is_channel_a_band(ch) ?  "5.2" : "2.4",
-                               geo_ch->flags & IEEE80211_CHAN_DISABLED ?
-                               "restricted" : "valid",
-                                geo_ch->flags);
-       }
-
-       priv->tx_power_device_lmt = max_tx_power;
-       priv->tx_power_user_lmt = max_tx_power;
-       priv->tx_power_next = max_tx_power;
-
-       if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
-            priv->cfg->sku & IWL_SKU_A) {
-               IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
-                       "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
-                          priv->pci_dev->device,
-                          priv->pci_dev->subsystem_device);
-               priv->cfg->sku &= ~IWL_SKU_A;
-       }
-
-       IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
-                  priv->bands[IEEE80211_BAND_2GHZ].n_channels,
-                  priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
-       set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_geos);
-
-/*
- * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
- */
-void iwl_legacy_free_geos(struct iwl_priv *priv)
-{
-       kfree(priv->ieee_channels);
-       kfree(priv->ieee_rates);
-       clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_free_geos);
-
-static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
-                                    enum ieee80211_band band,
-                                    u16 channel, u8 extension_chan_offset)
-{
-       const struct iwl_channel_info *ch_info;
-
-       ch_info = iwl_legacy_get_channel_info(priv, band, channel);
-       if (!iwl_legacy_is_channel_valid(ch_info))
-               return false;
-
-       if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
-               return !(ch_info->ht40_extension_channel &
-                                       IEEE80211_CHAN_NO_HT40PLUS);
-       else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
-               return !(ch_info->ht40_extension_channel &
-                                       IEEE80211_CHAN_NO_HT40MINUS);
-
-       return false;
-}
-
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx,
-                           struct ieee80211_sta_ht_cap *ht_cap)
-{
-       if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
-               return false;
-
-       /*
-        * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
-        * the bit will not set if it is pure 40MHz case
-        */
-       if (ht_cap && !ht_cap->ht_supported)
-               return false;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-       if (priv->disable_ht40)
-               return false;
-#endif
-
-       return iwl_legacy_is_channel_extension(priv, priv->band,
-                       le16_to_cpu(ctx->staging.channel),
-                       ctx->ht.extension_chan_offset);
-}
-EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
-
-static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
-       u16 new_val;
-       u16 beacon_factor;
-
-       /*
-        * If mac80211 hasn't given us a beacon interval, program
-        * the default into the device.
-        */
-       if (!beacon_val)
-               return DEFAULT_BEACON_INTERVAL;
-
-       /*
-        * If the beacon interval we obtained from the peer
-        * is too large, we'll have to wake up more often
-        * (and in IBSS case, we'll beacon too much)
-        *
-        * For example, if max_beacon_val is 4096, and the
-        * requested beacon interval is 7000, we'll have to
-        * use 3500 to be able to wake up on the beacons.
-        *
-        * This could badly influence beacon detection stats.
-        */
-
-       beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
-       new_val = beacon_val / beacon_factor;
-
-       if (!new_val)
-               new_val = max_beacon_val;
-
-       return new_val;
-}
-
-int
-iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       u64 tsf;
-       s32 interval_tm, rem;
-       struct ieee80211_conf *conf = NULL;
-       u16 beacon_int;
-       struct ieee80211_vif *vif = ctx->vif;
-
-       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
-       lockdep_assert_held(&priv->mutex);
-
-       memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
-       ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
-       ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
-       beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
-       /*
-        * TODO: For IBSS we need to get atim_window from mac80211,
-        *       for now just always use 0
-        */
-       ctx->timing.atim_window = 0;
-
-       beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
-                       priv->hw_params.max_beacon_itrvl * TIME_UNIT);
-       ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-
-       tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
-       interval_tm = beacon_int * TIME_UNIT;
-       rem = do_div(tsf, interval_tm);
-       ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
-       ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
-       IWL_DEBUG_ASSOC(priv,
-                       "beacon interval %d beacon timer %d beacon tim %d\n",
-                       le16_to_cpu(ctx->timing.beacon_interval),
-                       le32_to_cpu(ctx->timing.beacon_init_val),
-                       le16_to_cpu(ctx->timing.atim_window));
-
-       return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
-                               sizeof(ctx->timing), &ctx->timing);
-}
-EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
-
-void
-iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
-                               struct iwl_rxon_context *ctx,
-                               int hw_decrypt)
-{
-       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
-       if (hw_decrypt)
-               rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
-       else
-               rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
-
-/* validate RXON structure is valid */
-int
-iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-       bool error = false;
-
-       if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
-               if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
-                       IWL_WARN(priv, "check 2.4G: wrong narrow\n");
-                       error = true;
-               }
-               if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
-                       IWL_WARN(priv, "check 2.4G: wrong radar\n");
-                       error = true;
-               }
-       } else {
-               if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
-                       IWL_WARN(priv, "check 5.2G: not short slot!\n");
-                       error = true;
-               }
-               if (rxon->flags & RXON_FLG_CCK_MSK) {
-                       IWL_WARN(priv, "check 5.2G: CCK!\n");
-                       error = true;
-               }
-       }
-       if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
-               IWL_WARN(priv, "mac/bssid mcast!\n");
-               error = true;
-       }
-
-       /* make sure basic rates 6Mbps and 1Mbps are supported */
-       if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
-           (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
-               IWL_WARN(priv, "neither 1 nor 6 are basic\n");
-               error = true;
-       }
-
-       if (le16_to_cpu(rxon->assoc_id) > 2007) {
-               IWL_WARN(priv, "aid > 2007\n");
-               error = true;
-       }
-
-       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
-                       == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
-               IWL_WARN(priv, "CCK and short slot\n");
-               error = true;
-       }
-
-       if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
-                       == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
-               IWL_WARN(priv, "CCK and auto detect");
-               error = true;
-       }
-
-       if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
-                           RXON_FLG_TGG_PROTECT_MSK)) ==
-                           RXON_FLG_TGG_PROTECT_MSK) {
-               IWL_WARN(priv, "TGg but no auto-detect\n");
-               error = true;
-       }
-
-       if (error)
-               IWL_WARN(priv, "Tuning to channel %d\n",
-                           le16_to_cpu(rxon->channel));
-
-       if (error) {
-               IWL_ERR(priv, "Invalid RXON\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
-
-/**
- * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
-                          struct iwl_rxon_context *ctx)
-{
-       const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
-       const struct iwl_legacy_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond)                                                      \
-       if ((cond)) {                                                   \
-               IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");   \
-               return 1;                                               \
-       }
-
-#define CHK_NEQ(c1, c2)                                                \
-       if ((c1) != (c2)) {                                     \
-               IWL_DEBUG_INFO(priv, "need full RXON - "        \
-                              #c1 " != " #c2 " - %d != %d\n",  \
-                              (c1), (c2));                     \
-               return 1;                                       \
-       }
-
-       /* These items are only settable from the full RXON command */
-       CHK(!iwl_legacy_is_associated_ctx(ctx));
-       CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
-       CHK(compare_ether_addr(staging->node_addr, active->node_addr));
-       CHK(compare_ether_addr(staging->wlap_bssid_addr,
-                               active->wlap_bssid_addr));
-       CHK_NEQ(staging->dev_type, active->dev_type);
-       CHK_NEQ(staging->channel, active->channel);
-       CHK_NEQ(staging->air_propagation, active->air_propagation);
-       CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
-               active->ofdm_ht_single_stream_basic_rates);
-       CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
-               active->ofdm_ht_dual_stream_basic_rates);
-       CHK_NEQ(staging->assoc_id, active->assoc_id);
-
-       /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
-        * be updated with the RXON_ASSOC command -- however only some
-        * flag transitions are allowed using RXON_ASSOC */
-
-       /* Check if we are not switching bands */
-       CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
-               active->flags & RXON_FLG_BAND_24G_MSK);
-
-       /* Check if we are switching association toggle */
-       CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
-               active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx)
-{
-       /*
-        * Assign the lowest rate -- should really get this from
-        * the beacon skb from mac80211.
-        */
-       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
-               return IWL_RATE_1M_PLCP;
-       else
-               return IWL_RATE_6M_PLCP;
-}
-EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
-
-static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
-                            struct iwl_ht_config *ht_conf,
-                            struct iwl_rxon_context *ctx)
-{
-       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
-       if (!ctx->ht.enabled) {
-               rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
-                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
-                       RXON_FLG_HT40_PROT_MSK |
-                       RXON_FLG_HT_PROT_MSK);
-               return;
-       }
-
-       rxon->flags |= cpu_to_le32(ctx->ht.protection <<
-                                       RXON_FLG_HT_OPERATING_MODE_POS);
-
-       /* Set up channel bandwidth:
-        * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
-       /* clear the HT channel mode before set the mode */
-       rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
-                        RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
-       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
-               /* pure ht40 */
-               if (ctx->ht.protection ==
-                               IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
-                       rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
-                       /* Note: control channel is opposite of extension channel */
-                       switch (ctx->ht.extension_chan_offset) {
-                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                               rxon->flags &=
-                                       ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-                               break;
-                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                               rxon->flags |=
-                                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-                               break;
-                       }
-               } else {
-                       /* Note: control channel is opposite of extension channel */
-                       switch (ctx->ht.extension_chan_offset) {
-                       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                               rxon->flags &=
-                                       ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
-                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
-                               break;
-                       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                               rxon->flags |=
-                                       RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
-                               rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
-                               break;
-                       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
-                       default:
-                               /* channel location only valid if in Mixed mode */
-                               IWL_ERR(priv,
-                                       "invalid extension channel offset\n");
-                               break;
-                       }
-               }
-       } else {
-               rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
-       }
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-       IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
-                       "extension channel offset 0x%x\n",
-                       le32_to_cpu(rxon->flags), ctx->ht.protection,
-                       ctx->ht.extension_chan_offset);
-}
-
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
-       struct iwl_rxon_context *ctx;
-
-       for_each_context(priv, ctx)
-               _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
-                                enum ieee80211_band band)
-{
-       const struct iwl_channel_info *ch_info;
-       int i;
-       u8 channel = 0;
-       u8 min, max;
-       struct iwl_rxon_context *ctx;
-
-       if (band == IEEE80211_BAND_5GHZ) {
-               min = 14;
-               max = priv->channel_count;
-       } else {
-               min = 0;
-               max = 14;
-       }
-
-       for (i = min; i < max; i++) {
-               bool busy = false;
-
-               for_each_context(priv, ctx) {
-                       busy = priv->channel_info[i].channel ==
-                               le16_to_cpu(ctx->staging.channel);
-                       if (busy)
-                               break;
-               }
-
-               if (busy)
-                       continue;
-
-               channel = priv->channel_info[i].channel;
-               ch_info = iwl_legacy_get_channel_info(priv, band, channel);
-               if (iwl_legacy_is_channel_valid(ch_info))
-                       break;
-       }
-
-       return channel;
-}
-EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
-
-/**
- * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-int
-iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
-                        struct iwl_rxon_context *ctx)
-{
-       enum ieee80211_band band = ch->band;
-       u16 channel = ch->hw_value;
-
-       if ((le16_to_cpu(ctx->staging.channel) == channel) &&
-           (priv->band == band))
-               return 0;
-
-       ctx->staging.channel = cpu_to_le16(channel);
-       if (band == IEEE80211_BAND_5GHZ)
-               ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
-       else
-               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
-       priv->band = band;
-
-       IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
-
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx,
-                           enum ieee80211_band band,
-                           struct ieee80211_vif *vif)
-{
-       if (band == IEEE80211_BAND_5GHZ) {
-               ctx->staging.flags &=
-                   ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
-                     | RXON_FLG_CCK_MSK);
-               ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-       } else {
-               /* Copied from iwl_post_associate() */
-               if (vif && vif->bss_conf.use_short_slot)
-                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
-               ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-               ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
-               ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
-                                  struct iwl_rxon_context *ctx)
-{
-       const struct iwl_channel_info *ch_info;
-
-       memset(&ctx->staging, 0, sizeof(ctx->staging));
-
-       if (!ctx->vif) {
-               ctx->staging.dev_type = ctx->unused_devtype;
-       } else
-       switch (ctx->vif->type) {
-
-       case NL80211_IFTYPE_STATION:
-               ctx->staging.dev_type = ctx->station_devtype;
-               ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
-               break;
-
-       case NL80211_IFTYPE_ADHOC:
-               ctx->staging.dev_type = ctx->ibss_devtype;
-               ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
-               ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
-                                                 RXON_FILTER_ACCEPT_GRP_MSK;
-               break;
-
-       default:
-               IWL_ERR(priv, "Unsupported interface type %d\n",
-                       ctx->vif->type);
-               break;
-       }
-
-#if 0
-       /* TODO:  Figure out when short_preamble would be set and cache from
-        * that */
-       if (!hw_to_local(priv->hw)->short_preamble)
-               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-       else
-               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
-       ch_info = iwl_legacy_get_channel_info(priv, priv->band,
-                                      le16_to_cpu(ctx->active.channel));
-
-       if (!ch_info)
-               ch_info = &priv->channel_info[0];
-
-       ctx->staging.channel = cpu_to_le16(ch_info->channel);
-       priv->band = ch_info->band;
-
-       iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
-       ctx->staging.ofdm_basic_rates =
-           (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-       ctx->staging.cck_basic_rates =
-           (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
-       /* clear both MIX and PURE40 mode flag */
-       ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
-                                       RXON_FLG_CHANNEL_MODE_PURE_40);
-       if (ctx->vif)
-               memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
-       ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
-       ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-}
-EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
-
-void iwl_legacy_set_rate(struct iwl_priv *priv)
-{
-       const struct ieee80211_supported_band *hw = NULL;
-       struct ieee80211_rate *rate;
-       struct iwl_rxon_context *ctx;
-       int i;
-
-       hw = iwl_get_hw_mode(priv, priv->band);
-       if (!hw) {
-               IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
-               return;
-       }
-
-       priv->active_rate = 0;
-
-       for (i = 0; i < hw->n_bitrates; i++) {
-               rate = &(hw->bitrates[i]);
-               if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
-                       priv->active_rate |= (1 << rate->hw_value);
-       }
-
-       IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
-       for_each_context(priv, ctx) {
-               ctx->staging.cck_basic_rates =
-                   (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
-               ctx->staging.ofdm_basic_rates =
-                  (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_set_rate);
-
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-               ieee80211_chswitch_done(ctx->vif, is_success);
-}
-EXPORT_SYMBOL(iwl_legacy_chswitch_done);
-
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
-
-       if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-               return;
-
-       if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
-               rxon->channel = csa->channel;
-               ctx->staging.channel = csa->channel;
-               IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
-                             le16_to_cpu(csa->channel));
-               iwl_legacy_chswitch_done(priv, true);
-       } else {
-               IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
-                       le16_to_cpu(csa->channel));
-               iwl_legacy_chswitch_done(priv, false);
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_rx_csa);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
-                            struct iwl_rxon_context *ctx)
-{
-       struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
-       IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
-       iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
-       IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
-                               le16_to_cpu(rxon->channel));
-       IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
-       IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
-                               le32_to_cpu(rxon->filter_flags));
-       IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
-       IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
-                       rxon->ofdm_basic_rates);
-       IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
-                               rxon->cck_basic_rates);
-       IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
-       IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
-       IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
-                               le16_to_cpu(rxon->assoc_id));
-}
-EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
-#endif
-/**
- * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
-{
-       /* Set the FW error flag -- cleared on iwl_down */
-       set_bit(STATUS_FW_ERROR, &priv->status);
-
-       /* Cancel currently queued command. */
-       clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
-       IWL_ERR(priv, "Loaded firmware version: %s\n",
-               priv->hw->wiphy->fw_version);
-
-       priv->cfg->ops->lib->dump_nic_error_log(priv);
-       if (priv->cfg->ops->lib->dump_fh)
-               priv->cfg->ops->lib->dump_fh(priv, NULL, false);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
-               iwl_legacy_print_rx_config_cmd(priv,
-                                       &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
-       wake_up(&priv->wait_command_queue);
-
-       /* Keep the restart process from trying to send host
-        * commands by clearing the INIT status bit */
-       clear_bit(STATUS_READY, &priv->status);
-
-       if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-               IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
-                         "Restarting adapter due to uCode error.\n");
-
-               if (priv->cfg->mod_params->restart_fw)
-                       queue_work(priv->workqueue, &priv->restart);
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
-
-static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
-{
-       int ret = 0;
-
-       /* stop device's busmaster DMA activity */
-       iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
-       ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
-                       CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
-       if (ret)
-               IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
-       IWL_DEBUG_INFO(priv, "stop master\n");
-
-       return ret;
-}
-
-void iwl_legacy_apm_stop(struct iwl_priv *priv)
-{
-       IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
-       /* Stop device's DMA activity */
-       iwl_legacy_apm_stop_master(priv);
-
-       /* Reset the entire device */
-       iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
-       udelay(10);
-
-       /*
-        * Clear "initialization complete" bit to move adapter from
-        * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
-        */
-       iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
-                               CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-EXPORT_SYMBOL(iwl_legacy_apm_stop);
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE:  This does not load uCode nor start the embedded processor
- */
-int iwl_legacy_apm_init(struct iwl_priv *priv)
-{
-       int ret = 0;
-       u16 lctl;
-
-       IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
-       /*
-        * Use "set_bit" below rather than "write", to preserve any hardware
-        * bits already set by default after reset.
-        */
-
-       /* Disable L0S exit timer (platform NMI Work/Around) */
-       iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-                         CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
-       /*
-        * Disable L0s without affecting L1;
-        *  don't wait for ICH L0s (ICH bug W/A)
-        */
-       iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
-                         CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
-       /* Set FH wait threshold to maximum (HW error during stress W/A) */
-       iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
-                                       CSR_DBG_HPET_MEM_REG_VAL);
-
-       /*
-        * Enable HAP INTA (interrupt from management bus) to
-        * wake device's PCI Express link L1a -> L0s
-        * NOTE:  This is no-op for 3945 (non-existent bit)
-        */
-       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                                   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
-       /*
-        * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
-        * Check if BIOS (or OS) enabled L1-ASPM on this device.
-        * If so (likely), disable L0S, so device moves directly L0->L1;
-        *    costs negligible amount of power savings.
-        * If not (unlikely), enable L0S, so there is at least some
-        *    power savings, even without L1.
-        */
-       if (priv->cfg->base_params->set_l0s) {
-               lctl = iwl_legacy_pcie_link_ctl(priv);
-               if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
-                                       PCI_CFG_LINK_CTRL_VAL_L1_EN) {
-                       /* L1-ASPM enabled; disable(!) L0S  */
-                       iwl_legacy_set_bit(priv, CSR_GIO_REG,
-                                       CSR_GIO_REG_VAL_L0S_ENABLED);
-                       IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
-               } else {
-                       /* L1-ASPM disabled; enable(!) L0S */
-                       iwl_legacy_clear_bit(priv, CSR_GIO_REG,
-                                       CSR_GIO_REG_VAL_L0S_ENABLED);
-                       IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
-               }
-       }
-
-       /* Configure analog phase-lock-loop before activating to D0A */
-       if (priv->cfg->base_params->pll_cfg_val)
-               iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
-                           priv->cfg->base_params->pll_cfg_val);
-
-       /*
-        * Set "initialization complete" bit to move adapter from
-        * D0U* --> D0A* (powered-up active) state.
-        */
-       iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
-       /*
-        * Wait for clock stabilization; once stabilized, access to
-        * device-internal resources is supported, e.g. iwl_legacy_write_prph()
-        * and accesses to uCode SRAM.
-        */
-       ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
-                       CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
-                       CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
-       if (ret < 0) {
-               IWL_DEBUG_INFO(priv, "Failed to init the card\n");
-               goto out;
-       }
-
-       /*
-        * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
-        * BSM (Boostrap State Machine) is only in 3945 and 4965.
-        *
-        * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
-        * do not disable clocks.  This preserves any hardware bits already
-        * set by default in "CLK_CTRL_REG" after reset.
-        */
-       if (priv->cfg->base_params->use_bsm)
-               iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
-                       APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
-       else
-               iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
-                       APMG_CLK_VAL_DMA_CLK_RQT);
-       udelay(20);
-
-       /* Disable L1-Active */
-       iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
-                         APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_apm_init);
-
-
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
-       int ret;
-       s8 prev_tx_power;
-       bool defer;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (priv->tx_power_user_lmt == tx_power && !force)
-               return 0;
-
-       if (!priv->cfg->ops->lib->send_tx_power)
-               return -EOPNOTSUPP;
-
-       /* 0 dBm mean 1 milliwatt */
-       if (tx_power < 0) {
-               IWL_WARN(priv,
-                        "Requested user TXPOWER %d below 1 mW.\n",
-                        tx_power);
-               return -EINVAL;
-       }
-
-       if (tx_power > priv->tx_power_device_lmt) {
-               IWL_WARN(priv,
-                       "Requested user TXPOWER %d above upper limit %d.\n",
-                        tx_power, priv->tx_power_device_lmt);
-               return -EINVAL;
-       }
-
-       if (!iwl_legacy_is_ready_rf(priv))
-               return -EIO;
-
-       /* scan complete and commit_rxon use tx_power_next value,
-        * it always need to be updated for newest request */
-       priv->tx_power_next = tx_power;
-
-       /* do not set tx power when scanning or channel changing */
-       defer = test_bit(STATUS_SCANNING, &priv->status) ||
-               memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
-       if (defer && !force) {
-               IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
-               return 0;
-       }
-
-       prev_tx_power = priv->tx_power_user_lmt;
-       priv->tx_power_user_lmt = tx_power;
-
-       ret = priv->cfg->ops->lib->send_tx_power(priv);
-
-       /* if fail to set tx_power, restore the orig. tx power */
-       if (ret) {
-               priv->tx_power_user_lmt = prev_tx_power;
-               priv->tx_power_next = prev_tx_power;
-       }
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_set_tx_power);
-
-void iwl_legacy_send_bt_config(struct iwl_priv *priv)
-{
-       struct iwl_bt_cmd bt_cmd = {
-               .lead_time = BT_LEAD_TIME_DEF,
-               .max_kill = BT_MAX_KILL_DEF,
-               .kill_ack_mask = 0,
-               .kill_cts_mask = 0,
-       };
-
-       if (!bt_coex_active)
-               bt_cmd.flags = BT_COEX_DISABLE;
-       else
-               bt_cmd.flags = BT_COEX_ENABLE;
-
-       IWL_DEBUG_INFO(priv, "BT coex %s\n",
-               (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
-       if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
-                            sizeof(struct iwl_bt_cmd), &bt_cmd))
-               IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-EXPORT_SYMBOL(iwl_legacy_send_bt_config);
-
-int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
-       struct iwl_statistics_cmd statistics_cmd = {
-               .configuration_flags =
-                       clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
-       };
-
-       if (flags & CMD_ASYNC)
-               return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
-                                       sizeof(struct iwl_statistics_cmd),
-                                       &statistics_cmd, NULL);
-       else
-               return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
-                                       sizeof(struct iwl_statistics_cmd),
-                                       &statistics_cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
-
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
-                          struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
-       IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
-                    sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
-
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
-                       "notification for %s:\n", len,
-                       iwl_legacy_get_cmd_string(pkt->hdr.cmd));
-       iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
-
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
-                       struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
-       IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
-               "seq 0x%04X ser 0x%08X\n",
-               le32_to_cpu(pkt->u.err_resp.error_type),
-               iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
-               pkt->u.err_resp.cmd_id,
-               le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
-               le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
-{
-       memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
-                          struct ieee80211_vif *vif, u16 queue,
-                          const struct ieee80211_tx_queue_params *params)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx;
-       unsigned long flags;
-       int q;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (!iwl_legacy_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-               return -EIO;
-       }
-
-       if (queue >= AC_NUM) {
-               IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
-               return 0;
-       }
-
-       q = AC_NUM - 1 - queue;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       for_each_context(priv, ctx) {
-               ctx->qos_data.def_qos_parm.ac[q].cw_min =
-                       cpu_to_le16(params->cw_min);
-               ctx->qos_data.def_qos_parm.ac[q].cw_max =
-                       cpu_to_le16(params->cw_max);
-               ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
-               ctx->qos_data.def_qos_parm.ac[q].edca_txop =
-                               cpu_to_le16((params->txop * 32));
-
-               ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
-       }
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
-
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
-
-static int
-iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       iwl_legacy_connection_init_rx_config(priv, ctx);
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
-       return iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static int iwl_legacy_setup_interface(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx)
-{
-       struct ieee80211_vif *vif = ctx->vif;
-       int err;
-
-       lockdep_assert_held(&priv->mutex);
-
-       /*
-        * This variable will be correct only when there's just
-        * a single context, but all code using it is for hardware
-        * that supports only one context.
-        */
-       priv->iw_mode = vif->type;
-
-       ctx->is_active = true;
-
-       err = iwl_legacy_set_mode(priv, ctx);
-       if (err) {
-               if (!ctx->always_active)
-                       ctx->is_active = false;
-               return err;
-       }
-
-       return 0;
-}
-
-int
-iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       struct iwl_rxon_context *tmp, *ctx = NULL;
-       int err;
-
-       IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
-                          vif->type, vif->addr);
-
-       mutex_lock(&priv->mutex);
-
-       if (!iwl_legacy_is_ready_rf(priv)) {
-               IWL_WARN(priv, "Try to add interface when device not ready\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       for_each_context(priv, tmp) {
-               u32 possible_modes =
-                       tmp->interface_modes | tmp->exclusive_interface_modes;
-
-               if (tmp->vif) {
-                       /* check if this busy context is exclusive */
-                       if (tmp->exclusive_interface_modes &
-                                               BIT(tmp->vif->type)) {
-                               err = -EINVAL;
-                               goto out;
-                       }
-                       continue;
-               }
-
-               if (!(possible_modes & BIT(vif->type)))
-                       continue;
-
-               /* have maybe usable context w/o interface */
-               ctx = tmp;
-               break;
-       }
-
-       if (!ctx) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
-
-       vif_priv->ctx = ctx;
-       ctx->vif = vif;
-
-       err = iwl_legacy_setup_interface(priv, ctx);
-       if (!err)
-               goto out;
-
-       ctx->vif = NULL;
-       priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
-
-static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
-                                  struct ieee80211_vif *vif,
-                                  bool mode_change)
-{
-       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (priv->scan_vif == vif) {
-               iwl_legacy_scan_cancel_timeout(priv, 200);
-               iwl_legacy_force_scan_end(priv);
-       }
-
-       if (!mode_change) {
-               iwl_legacy_set_mode(priv, ctx);
-               if (!ctx->always_active)
-                       ctx->is_active = false;
-       }
-}
-
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       mutex_lock(&priv->mutex);
-
-       WARN_ON(ctx->vif != vif);
-       ctx->vif = NULL;
-
-       iwl_legacy_teardown_interface(priv, vif, false);
-
-       memset(priv->bssid, 0, ETH_ALEN);
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
-
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
-{
-       if (!priv->txq)
-               priv->txq = kzalloc(
-                       sizeof(struct iwl_tx_queue) *
-                               priv->cfg->base_params->num_of_queues,
-                       GFP_KERNEL);
-       if (!priv->txq) {
-               IWL_ERR(priv, "Not enough memory for txq\n");
-               return -ENOMEM;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
-
-void iwl_legacy_txq_mem(struct iwl_priv *priv)
-{
-       kfree(priv->txq);
-       priv->txq = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_mem);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE  (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-       priv->tx_traffic_idx = 0;
-       priv->rx_traffic_idx = 0;
-       if (priv->tx_traffic)
-               memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-       if (priv->rx_traffic)
-               memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
-       u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
-       if (iwlegacy_debug_level & IWL_DL_TX) {
-               if (!priv->tx_traffic) {
-                       priv->tx_traffic =
-                               kzalloc(traffic_size, GFP_KERNEL);
-                       if (!priv->tx_traffic)
-                               return -ENOMEM;
-               }
-       }
-       if (iwlegacy_debug_level & IWL_DL_RX) {
-               if (!priv->rx_traffic) {
-                       priv->rx_traffic =
-                               kzalloc(traffic_size, GFP_KERNEL);
-                       if (!priv->rx_traffic)
-                               return -ENOMEM;
-               }
-       }
-       iwl_legacy_reset_traffic_log(priv);
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
-
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-       kfree(priv->tx_traffic);
-       priv->tx_traffic = NULL;
-
-       kfree(priv->rx_traffic);
-       priv->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
-
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
-                     u16 length, struct ieee80211_hdr *header)
-{
-       __le16 fc;
-       u16 len;
-
-       if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
-               return;
-
-       if (!priv->tx_traffic)
-               return;
-
-       fc = header->frame_control;
-       if (ieee80211_is_data(fc)) {
-               len = (length > IWL_TRAFFIC_ENTRY_SIZE)
-                      ? IWL_TRAFFIC_ENTRY_SIZE : length;
-               memcpy((priv->tx_traffic +
-                      (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
-                      header, len);
-               priv->tx_traffic_idx =
-                       (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
-
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
-                     u16 length, struct ieee80211_hdr *header)
-{
-       __le16 fc;
-       u16 len;
-
-       if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
-               return;
-
-       if (!priv->rx_traffic)
-               return;
-
-       fc = header->frame_control;
-       if (ieee80211_is_data(fc)) {
-               len = (length > IWL_TRAFFIC_ENTRY_SIZE)
-                      ? IWL_TRAFFIC_ENTRY_SIZE : length;
-               memcpy((priv->rx_traffic +
-                      (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
-                      header, len);
-               priv->rx_traffic_idx =
-                       (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
-
-const char *iwl_legacy_get_mgmt_string(int cmd)
-{
-       switch (cmd) {
-               IWL_CMD(MANAGEMENT_ASSOC_REQ);
-               IWL_CMD(MANAGEMENT_ASSOC_RESP);
-               IWL_CMD(MANAGEMENT_REASSOC_REQ);
-               IWL_CMD(MANAGEMENT_REASSOC_RESP);
-               IWL_CMD(MANAGEMENT_PROBE_REQ);
-               IWL_CMD(MANAGEMENT_PROBE_RESP);
-               IWL_CMD(MANAGEMENT_BEACON);
-               IWL_CMD(MANAGEMENT_ATIM);
-               IWL_CMD(MANAGEMENT_DISASSOC);
-               IWL_CMD(MANAGEMENT_AUTH);
-               IWL_CMD(MANAGEMENT_DEAUTH);
-               IWL_CMD(MANAGEMENT_ACTION);
-       default:
-               return "UNKNOWN";
-
-       }
-}
-
-const char *iwl_legacy_get_ctrl_string(int cmd)
-{
-       switch (cmd) {
-               IWL_CMD(CONTROL_BACK_REQ);
-               IWL_CMD(CONTROL_BACK);
-               IWL_CMD(CONTROL_PSPOLL);
-               IWL_CMD(CONTROL_RTS);
-               IWL_CMD(CONTROL_CTS);
-               IWL_CMD(CONTROL_ACK);
-               IWL_CMD(CONTROL_CFEND);
-               IWL_CMD(CONTROL_CFENDACK);
-       default:
-               return "UNKNOWN";
-
-       }
-}
-
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
-{
-       memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
-       memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
- * iwl_legacy_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
-       struct traffic_stats    *stats;
-
-       if (is_tx)
-               stats = &priv->tx_stats;
-       else
-               stats = &priv->rx_stats;
-
-       if (ieee80211_is_mgmt(fc)) {
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-                       stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
-                       stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-                       stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
-                       stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
-                       stats->mgmt[MANAGEMENT_PROBE_REQ]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
-                       stats->mgmt[MANAGEMENT_PROBE_RESP]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_BEACON):
-                       stats->mgmt[MANAGEMENT_BEACON]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_ATIM):
-                       stats->mgmt[MANAGEMENT_ATIM]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
-                       stats->mgmt[MANAGEMENT_DISASSOC]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_AUTH):
-                       stats->mgmt[MANAGEMENT_AUTH]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-                       stats->mgmt[MANAGEMENT_DEAUTH]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_ACTION):
-                       stats->mgmt[MANAGEMENT_ACTION]++;
-                       break;
-               }
-       } else if (ieee80211_is_ctl(fc)) {
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
-                       stats->ctrl[CONTROL_BACK_REQ]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_BACK):
-                       stats->ctrl[CONTROL_BACK]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
-                       stats->ctrl[CONTROL_PSPOLL]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_RTS):
-                       stats->ctrl[CONTROL_RTS]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_CTS):
-                       stats->ctrl[CONTROL_CTS]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_ACK):
-                       stats->ctrl[CONTROL_ACK]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_CFEND):
-                       stats->ctrl[CONTROL_CFEND]++;
-                       break;
-               case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
-                       stats->ctrl[CONTROL_CFENDACK]++;
-                       break;
-               }
-       } else {
-               /* data */
-               stats->data_cnt++;
-               stats->data_bytes += len;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_update_stats);
-#endif
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
-{
-       struct iwl_force_reset *force_reset;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return -EINVAL;
-
-       force_reset = &priv->force_reset;
-       force_reset->reset_request_count++;
-       if (!external) {
-               if (force_reset->last_force_reset_jiffies &&
-                   time_after(force_reset->last_force_reset_jiffies +
-                   force_reset->reset_duration, jiffies)) {
-                       IWL_DEBUG_INFO(priv, "force reset rejected\n");
-                       force_reset->reset_reject_count++;
-                       return -EAGAIN;
-               }
-       }
-       force_reset->reset_success_count++;
-       force_reset->last_force_reset_jiffies = jiffies;
-
-       /*
-        * if the request is from external(ex: debugfs),
-        * then always perform the request in regardless the module
-        * parameter setting
-        * if the request is from internal (uCode error or driver
-        * detect failure), then fw_restart module parameter
-        * need to be check before performing firmware reload
-        */
-
-       if (!external && !priv->cfg->mod_params->restart_fw) {
-               IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
-                              "module parameter setting\n");
-               return 0;
-       }
-
-       IWL_ERR(priv, "On demand firmware reload\n");
-
-       /* Set the FW error flag -- cleared on iwl_down */
-       set_bit(STATUS_FW_ERROR, &priv->status);
-       wake_up(&priv->wait_command_queue);
-       /*
-        * Keep the restart process from trying to send host
-        * commands by clearing the INIT status bit
-        */
-       clear_bit(STATUS_READY, &priv->status);
-       queue_work(priv->workqueue, &priv->restart);
-
-       return 0;
-}
-
-int
-iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
-                       struct ieee80211_vif *vif,
-                       enum nl80211_iftype newtype, bool newp2p)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-       struct iwl_rxon_context *tmp;
-       u32 interface_modes;
-       int err;
-
-       newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
-       mutex_lock(&priv->mutex);
-
-       if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
-               /*
-                * Huh? But wait ... this can maybe happen when
-                * we're in the middle of a firmware restart!
-                */
-               err = -EBUSY;
-               goto out;
-       }
-
-       interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
-       if (!(interface_modes & BIT(newtype))) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       if (ctx->exclusive_interface_modes & BIT(newtype)) {
-               for_each_context(priv, tmp) {
-                       if (ctx == tmp)
-                               continue;
-
-                       if (!tmp->vif)
-                               continue;
-
-                       /*
-                        * The current mode switch would be exclusive, but
-                        * another context is active ... refuse the switch.
-                        */
-                       err = -EBUSY;
-                       goto out;
-               }
-       }
-
-       /* success */
-       iwl_legacy_teardown_interface(priv, vif, true);
-       vif->type = newtype;
-       vif->p2p = newp2p;
-       err = iwl_legacy_setup_interface(priv, ctx);
-       WARN_ON(err);
-       /*
-        * We've switched internally, but submitting to the
-        * device may have failed for some reason. Mask this
-        * error, because otherwise mac80211 will not switch
-        * (and set the interface type back) and we'll be
-        * out of sync with it.
-        */
-       err = 0;
-
- out:
-       mutex_unlock(&priv->mutex);
-       return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
-
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
-{
-       struct iwl_tx_queue *txq = &priv->txq[cnt];
-       struct iwl_queue *q = &txq->q;
-       unsigned long timeout;
-       int ret;
-
-       if (q->read_ptr == q->write_ptr) {
-               txq->time_stamp = jiffies;
-               return 0;
-       }
-
-       timeout = txq->time_stamp +
-                 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
-
-       if (time_after(jiffies, timeout)) {
-               IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
-                               q->id, priv->cfg->base_params->wd_timeout);
-               ret = iwl_legacy_force_reset(priv, false);
-               return (ret == -EAGAIN) ? 0 : 1;
-       }
-
-       return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_legacy_bg_watchdog(unsigned long data)
-{
-       struct iwl_priv *priv = (struct iwl_priv *)data;
-       int cnt;
-       unsigned long timeout;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       timeout = priv->cfg->base_params->wd_timeout;
-       if (timeout == 0)
-               return;
-
-       /* monitor and check for stuck cmd queue */
-       if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
-               return;
-
-       /* monitor and check for other stuck queues */
-       if (iwl_legacy_is_any_associated(priv)) {
-               for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-                       /* skip as we already checked the command queue */
-                       if (cnt == priv->cmd_queue)
-                               continue;
-                       if (iwl_legacy_check_stuck_queue(priv, cnt))
-                               return;
-               }
-       }
-
-       mod_timer(&priv->watchdog, jiffies +
-                 msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
-
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
-{
-       unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
-       if (timeout)
-               mod_timer(&priv->watchdog,
-                         jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
-       else
-               del_timer(&priv->watchdog);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32
-iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
-                                       u32 usec, u32 beacon_interval)
-{
-       u32 quot;
-       u32 rem;
-       u32 interval = beacon_interval * TIME_UNIT;
-
-       if (!interval || !usec)
-               return 0;
-
-       quot = (usec / interval) &
-               (iwl_legacy_beacon_time_mask_high(priv,
-               priv->hw_params.beacon_time_tsf_bits) >>
-               priv->hw_params.beacon_time_tsf_bits);
-       rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
-                                  priv->hw_params.beacon_time_tsf_bits);
-
-       return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
-}
-EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
-                          u32 addon, u32 beacon_interval)
-{
-       u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
-                                       priv->hw_params.beacon_time_tsf_bits);
-       u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
-                                       priv->hw_params.beacon_time_tsf_bits);
-       u32 interval = beacon_interval * TIME_UNIT;
-       u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
-                               priv->hw_params.beacon_time_tsf_bits)) +
-                               (addon & iwl_legacy_beacon_time_mask_high(priv,
-                               priv->hw_params.beacon_time_tsf_bits));
-
-       if (base_low > addon_low)
-               res += base_low - addon_low;
-       else if (base_low < addon_low) {
-               res += interval + base_low - addon_low;
-               res += (1 << priv->hw_params.beacon_time_tsf_bits);
-       } else
-               res += (1 << priv->hw_params.beacon_time_tsf_bits);
-
-       return cpu_to_le32(res);
-}
-EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
-
-#ifdef CONFIG_PM
-
-int iwl_legacy_pci_suspend(struct device *device)
-{
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct iwl_priv *priv = pci_get_drvdata(pdev);
-
-       /*
-        * This function is called when system goes into suspend state
-        * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
-        * first but since iwl_mac_stop() has no knowledge of who the caller is,
-        * it will not call apm_ops.stop() to stop the DMA operation.
-        * Calling apm_ops.stop here to make sure we stop the DMA.
-        */
-       iwl_legacy_apm_stop(priv);
-
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_suspend);
-
-int iwl_legacy_pci_resume(struct device *device)
-{
-       struct pci_dev *pdev = to_pci_dev(device);
-       struct iwl_priv *priv = pci_get_drvdata(pdev);
-       bool hw_rfkill = false;
-
-       /*
-        * We disable the RETRY_TIMEOUT register (0x41) to keep
-        * PCI Tx retries from interfering with C3 CPU state.
-        */
-       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
-       iwl_legacy_enable_interrupts(priv);
-
-       if (!(iwl_read32(priv, CSR_GP_CNTRL) &
-                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
-               hw_rfkill = true;
-
-       if (hw_rfkill)
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-       wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
-
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_resume);
-
-const struct dev_pm_ops iwl_legacy_pm_ops = {
-       .suspend = iwl_legacy_pci_suspend,
-       .resume = iwl_legacy_pci_resume,
-       .freeze = iwl_legacy_pci_suspend,
-       .thaw = iwl_legacy_pci_resume,
-       .poweroff = iwl_legacy_pci_suspend,
-       .restore = iwl_legacy_pci_resume,
-};
-EXPORT_SYMBOL(iwl_legacy_pm_ops);
-
-#endif /* CONFIG_PM */
-
-static void
-iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (!ctx->is_active)
-               return;
-
-       ctx->qos_data.def_qos_parm.qos_flags = 0;
-
-       if (ctx->qos_data.qos_active)
-               ctx->qos_data.def_qos_parm.qos_flags |=
-                       QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
-       if (ctx->ht.enabled)
-               ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
-       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
-                     ctx->qos_data.qos_active,
-                     ctx->qos_data.def_qos_parm.qos_flags);
-
-       iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
-                              sizeof(struct iwl_qosparam_cmd),
-                              &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct iwl_priv *priv = hw->priv;
-       const struct iwl_channel_info *ch_info;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_channel *channel = conf->channel;
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct iwl_rxon_context *ctx;
-       unsigned long flags = 0;
-       int ret = 0;
-       u16 ch;
-       int scan_active = 0;
-       bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return -EOPNOTSUPP;
-
-       mutex_lock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
-                                       channel->hw_value, changed);
-
-       if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
-               scan_active = 1;
-               IWL_DEBUG_MAC80211(priv, "scan active\n");
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_SMPS |
-                      IEEE80211_CONF_CHANGE_CHANNEL)) {
-               /* mac80211 uses static for non-HT which is what we want */
-               priv->current_ht_config.smps = conf->smps_mode;
-
-               /*
-                * Recalculate chain counts.
-                *
-                * If monitor mode is enabled then mac80211 will
-                * set up the SM PS mode to OFF if an HT channel is
-                * configured.
-                */
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       for_each_context(priv, ctx)
-                               priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       /* during scanning mac80211 will delay channel setting until
-        * scan finish with changed = 0
-        */
-       if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-               if (scan_active)
-                       goto set_ch_out;
-
-               ch = channel->hw_value;
-               ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
-               if (!iwl_legacy_is_channel_valid(ch_info)) {
-                       IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
-                       ret = -EINVAL;
-                       goto set_ch_out;
-               }
-
-               if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
-                   !iwl_legacy_is_channel_ibss(ch_info)) {
-                       IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
-                       ret = -EINVAL;
-                       goto set_ch_out;
-               }
-
-               spin_lock_irqsave(&priv->lock, flags);
-
-               for_each_context(priv, ctx) {
-                       /* Configure HT40 channels */
-                       if (ctx->ht.enabled != conf_is_ht(conf)) {
-                               ctx->ht.enabled = conf_is_ht(conf);
-                               ht_changed[ctx->ctxid] = true;
-                       }
-                       if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                       IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
-                       } else
-                               ctx->ht.is_40mhz = false;
-
-                       /*
-                        * Default to no protection. Protection mode will
-                        * later be set from BSS config in iwl_ht_conf
-                        */
-                       ctx->ht.protection =
-                                       IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
-                       /* if we are switching from ht to 2.4 clear flags
-                        * from any ht related info since 2.4 does not
-                        * support ht */
-                       if ((le16_to_cpu(ctx->staging.channel) != ch))
-                               ctx->staging.flags = 0;
-
-                       iwl_legacy_set_rxon_channel(priv, channel, ctx);
-                       iwl_legacy_set_rxon_ht(priv, ht_conf);
-
-                       iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
-                                              ctx->vif);
-               }
-
-               spin_unlock_irqrestore(&priv->lock, flags);
-
-               if (priv->cfg->ops->legacy->update_bcast_stations)
-                       ret =
-                       priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
-               /* The list of supported rates and rate mask can be different
-                * for each band; since the band may have changed, reset
-                * the rate mask to what mac80211 lists */
-               iwl_legacy_set_rate(priv);
-       }
-
-       if (changed & (IEEE80211_CONF_CHANGE_PS |
-                       IEEE80211_CONF_CHANGE_IDLE)) {
-               ret = iwl_legacy_power_update_mode(priv, false);
-               if (ret)
-                       IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
-       }
-
-       if (changed & IEEE80211_CONF_CHANGE_POWER) {
-               IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
-                       priv->tx_power_user_lmt, conf->power_level);
-
-               iwl_legacy_set_tx_power(priv, conf->power_level, false);
-       }
-
-       if (!iwl_legacy_is_ready(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               goto out;
-       }
-
-       if (scan_active)
-               goto out;
-
-       for_each_context(priv, ctx) {
-               if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
-                       iwl_legacy_commit_rxon(priv, ctx);
-               else
-                       IWL_DEBUG_INFO(priv,
-                               "Not re-sending same RXON configuration.\n");
-               if (ht_changed[ctx->ctxid])
-                       iwl_legacy_update_qos(priv, ctx);
-       }
-
-out:
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       mutex_unlock(&priv->mutex);
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       /* IBSS can only be the IWL_RXON_CTX_BSS context */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return;
-
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       spin_lock_irqsave(&priv->lock, flags);
-       memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* new association get rid of ibss beacon skb */
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = NULL;
-
-       priv->timestamp = 0;
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl_legacy_scan_cancel_timeout(priv, 100);
-       if (!iwl_legacy_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
-               mutex_unlock(&priv->mutex);
-               return;
-       }
-
-       /* we are restarting association process
-        * clear RXON_FILTER_ASSOC_MSK bit
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwl_legacy_commit_rxon(priv, ctx);
-
-       iwl_legacy_set_rate(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_legacy_ht_conf(struct iwl_priv *priv,
-                       struct ieee80211_vif *vif)
-{
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       struct ieee80211_sta *sta;
-       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_ASSOC(priv, "enter:\n");
-
-       if (!ctx->ht.enabled)
-               return;
-
-       ctx->ht.protection =
-               bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
-       ctx->ht.non_gf_sta_present =
-               !!(bss_conf->ht_operation_mode &
-                               IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
-       ht_conf->single_chain_sufficient = false;
-
-       switch (vif->type) {
-       case NL80211_IFTYPE_STATION:
-               rcu_read_lock();
-               sta = ieee80211_find_sta(vif, bss_conf->bssid);
-               if (sta) {
-                       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-                       int maxstreams;
-
-                       maxstreams = (ht_cap->mcs.tx_params &
-                             IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
-                               >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
-                       maxstreams += 1;
-
-                       if ((ht_cap->mcs.rx_mask[1] == 0) &&
-                           (ht_cap->mcs.rx_mask[2] == 0))
-                               ht_conf->single_chain_sufficient = true;
-                       if (maxstreams <= 1)
-                               ht_conf->single_chain_sufficient = true;
-               } else {
-                       /*
-                        * If at all, this can only happen through a race
-                        * when the AP disconnects us while we're still
-                        * setting up the connection, in that case mac80211
-                        * will soon tell us about that.
-                        */
-                       ht_conf->single_chain_sufficient = true;
-               }
-               rcu_read_unlock();
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               ht_conf->single_chain_sufficient = true;
-               break;
-       default:
-               break;
-       }
-
-       IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
-                                   struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
-       /*
-        * inform the ucode that there is no longer an
-        * association and that no more packets should be
-        * sent
-        */
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       ctx->staging.assoc_id = 0;
-       iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
-                                 struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       unsigned long flags;
-       __le64 timestamp;
-       struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
-       if (!skb)
-               return;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->beacon_ctx) {
-               IWL_ERR(priv, "update beacon but no beacon context!\n");
-               dev_kfree_skb(skb);
-               return;
-       }
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       priv->beacon_skb = skb;
-
-       timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
-       priv->timestamp = le64_to_cpu(timestamp);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if (!iwl_legacy_is_ready_rf(priv)) {
-               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-               return;
-       }
-
-       priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-       int ret;
-
-       if (WARN_ON(!priv->cfg->ops->legacy))
-               return;
-
-       IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
-       mutex_lock(&priv->mutex);
-
-       if (!iwl_legacy_is_alive(priv)) {
-               mutex_unlock(&priv->mutex);
-               return;
-       }
-
-       if (changes & BSS_CHANGED_QOS) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&priv->lock, flags);
-               ctx->qos_data.qos_active = bss_conf->qos;
-               iwl_legacy_update_qos(priv, ctx);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               /*
-                * the add_interface code must make sure we only ever
-                * have a single interface that could be beaconing at
-                * any time.
-                */
-               if (vif->bss_conf.enable_beacon)
-                       priv->beacon_ctx = ctx;
-               else
-                       priv->beacon_ctx = NULL;
-       }
-
-       if (changes & BSS_CHANGED_BSSID) {
-               IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
-               /*
-                * If there is currently a HW scan going on in the
-                * background then we need to cancel it else the RXON
-                * below/in post_associate will fail.
-                */
-               if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
-                       IWL_WARN(priv,
-                               "Aborted scan still in progress after 100ms\n");
-                       IWL_DEBUG_MAC80211(priv,
-                               "leaving - scan abort failed.\n");
-                       mutex_unlock(&priv->mutex);
-                       return;
-               }
-
-               /* mac80211 only sets assoc when in STATION mode */
-               if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-
-                       /* currently needed in a few places */
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-               } else {
-                       ctx->staging.filter_flags &=
-                               ~RXON_FILTER_ASSOC_MSK;
-               }
-
-       }
-
-       /*
-        * This needs to be after setting the BSSID in case
-        * mac80211 decides to do both changes at once because
-        * it will invoke post_associate.
-        */
-       if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
-               iwl_legacy_beacon_update(hw, vif);
-
-       if (changes & BSS_CHANGED_ERP_PREAMBLE) {
-               IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
-                                  bss_conf->use_short_preamble);
-               if (bss_conf->use_short_preamble)
-                       ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-       }
-
-       if (changes & BSS_CHANGED_ERP_CTS_PROT) {
-               IWL_DEBUG_MAC80211(priv,
-                       "ERP_CTS %d\n", bss_conf->use_cts_prot);
-               if (bss_conf->use_cts_prot &&
-                       (priv->band != IEEE80211_BAND_5GHZ))
-                       ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
-               if (bss_conf->use_cts_prot)
-                       ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-       }
-
-       if (changes & BSS_CHANGED_BASIC_RATES) {
-               /* XXX use this information
-                *
-                * To do that, remove code from iwl_legacy_set_rate() and put something
-                * like this here:
-                *
-               if (A-band)
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates;
-               else
-                       ctx->staging.ofdm_basic_rates =
-                               bss_conf->basic_rates >> 4;
-                       ctx->staging.cck_basic_rates =
-                               bss_conf->basic_rates & 0xF;
-                */
-       }
-
-       if (changes & BSS_CHANGED_HT) {
-               iwl_legacy_ht_conf(priv, vif);
-
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       if (changes & BSS_CHANGED_ASSOC) {
-               IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
-               if (bss_conf->assoc) {
-                       priv->timestamp = bss_conf->timestamp;
-
-                       if (!iwl_legacy_is_rfkill(priv))
-                               priv->cfg->ops->legacy->post_associate(priv);
-               } else
-                       iwl_legacy_set_no_assoc(priv, vif);
-       }
-
-       if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
-               IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
-                                  changes);
-               ret = iwl_legacy_send_rxon_assoc(priv, ctx);
-               if (!ret) {
-                       /* Sync active_rxon with latest change. */
-                       memcpy((void *)&ctx->active,
-                               &ctx->staging,
-                               sizeof(struct iwl_legacy_rxon_cmd));
-               }
-       }
-
-       if (changes & BSS_CHANGED_BEACON_ENABLED) {
-               if (vif->bss_conf.enable_beacon) {
-                       memcpy(ctx->staging.bssid_addr,
-                              bss_conf->bssid, ETH_ALEN);
-                       memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
-                       priv->cfg->ops->legacy->config_ap(priv);
-               } else
-                       iwl_legacy_set_no_assoc(priv, vif);
-       }
-
-       if (changes & BSS_CHANGED_IBSS) {
-               ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
-                                                       bss_conf->ibss_joined);
-               if (ret)
-                       IWL_ERR(priv, "failed to %s IBSS station %pM\n",
-                               bss_conf->ibss_joined ? "add" : "remove",
-                               bss_conf->bssid);
-       }
-
-       mutex_unlock(&priv->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data)
-{
-       struct iwl_priv *priv = data;
-       u32 inta, inta_mask;
-       u32 inta_fh;
-       unsigned long flags;
-       if (!priv)
-               return IRQ_NONE;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Disable (but don't clear!) interrupts here to avoid
-        *    back-to-back ISRs and sporadic interrupts from our NIC.
-        * If we have something to service, the tasklet will re-enable ints.
-        * If we *don't* have something, we'll re-enable before leaving here. */
-       inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
-       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-       /* Discover which interrupts are active/pending */
-       inta = iwl_read32(priv, CSR_INT);
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
-       /* Ignore interrupt if there's nothing in NIC to service.
-        * This may be due to IRQ shared with another device,
-        * or due to sporadic interrupts thrown from our NIC. */
-       if (!inta && !inta_fh) {
-               IWL_DEBUG_ISR(priv,
-                       "Ignore interrupt, inta == 0, inta_fh == 0\n");
-               goto none;
-       }
-
-       if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
-               /* Hardware disappeared. It might have already raised
-                * an interrupt */
-               IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-               goto unplugged;
-       }
-
-       IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                     inta, inta_mask, inta_fh);
-
-       inta &= ~CSR_INT_BIT_SCD;
-
-       /* iwl_irq_tasklet() will service interrupts and re-enable them */
-       if (likely(inta || inta_fh))
-               tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_HANDLED;
-
-none:
-       /* re-enable interrupts here since we don't have anything to service. */
-       /* only Re-enable if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_legacy_enable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_legacy_isr);
-
-/*
- *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- *  function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-                              struct ieee80211_tx_info *info,
-                              __le16 fc, __le32 *tx_flags)
-{
-       if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
-               *tx_flags |= TX_CMD_FLG_RTS_MSK;
-               *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
-               if (!ieee80211_is_mgmt(fc))
-                       return;
-
-               switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
-               case cpu_to_le16(IEEE80211_STYPE_AUTH):
-               case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
-               case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
-               case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
-                       *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-                       *tx_flags |= TX_CMD_FLG_CTS_MSK;
-                       break;
-               }
-       } else if (info->control.rates[0].flags &
-                  IEEE80211_TX_RC_USE_CTS_PROTECT) {
-               *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
-               *tx_flags |= TX_CMD_FLG_CTS_MSK;
-               *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644 (file)
index d1271fe..0000000
+++ /dev/null
@@ -1,636 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_core_h__
-#define __iwl_legacy_core_h__
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR     "<ilw@linux.intel.com>"
-
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
-       .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
-       .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
-       .driver_data = (kernel_ulong_t)&(cfg)
-
-#define TIME_UNIT              1024
-
-#define IWL_SKU_G       0x1
-#define IWL_SKU_A       0x2
-#define IWL_SKU_N       0x8
-
-#define IWL_CMD(x) case x: return #x
-
-struct iwl_hcmd_ops {
-       int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-       int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-       void (*set_rxon_chain)(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx);
-};
-
-struct iwl_hcmd_utils_ops {
-       u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
-       u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
-                                                               u8 *data);
-       int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
-       void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
-       int (*init)(struct iwl_priv *priv);
-       void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_debugfs_ops {
-       ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
-                                size_t count, loff_t *ppos);
-       ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
-                                size_t count, loff_t *ppos);
-       ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
-                                     size_t count, loff_t *ppos);
-};
-
-struct iwl_temp_ops {
-       void (*temperature)(struct iwl_priv *priv);
-};
-
-struct iwl_lib_ops {
-       /* set hw dependent parameters */
-       int (*set_hw_params)(struct iwl_priv *priv);
-       /* Handling TX */
-       void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
-                                       struct iwl_tx_queue *txq,
-                                       u16 byte_cnt);
-       int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
-                                    struct iwl_tx_queue *txq,
-                                    dma_addr_t addr,
-                                    u16 len, u8 reset, u8 pad);
-       void (*txq_free_tfd)(struct iwl_priv *priv,
-                            struct iwl_tx_queue *txq);
-       int (*txq_init)(struct iwl_priv *priv,
-                       struct iwl_tx_queue *txq);
-       /* setup Rx handler */
-       void (*rx_handler_setup)(struct iwl_priv *priv);
-       /* alive notification after init uCode load */
-       void (*init_alive_start)(struct iwl_priv *priv);
-       /* check validity of rtc data address */
-       int (*is_valid_rtc_data_addr)(u32 addr);
-       /* 1st ucode load */
-       int (*load_ucode)(struct iwl_priv *priv);
-
-       void (*dump_nic_error_log)(struct iwl_priv *priv);
-       int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
-       int (*set_channel_switch)(struct iwl_priv *priv,
-                                 struct ieee80211_channel_switch *ch_switch);
-       /* power management */
-       struct iwl_apm_ops apm_ops;
-
-       /* power */
-       int (*send_tx_power) (struct iwl_priv *priv);
-       void (*update_chain_flags)(struct iwl_priv *priv);
-
-       /* eeprom operations (as defined in iwl-eeprom.h) */
-       struct iwl_eeprom_ops eeprom_ops;
-
-       /* temperature */
-       struct iwl_temp_ops temp_ops;
-
-       struct iwl_debugfs_ops debugfs_ops;
-
-};
-
-struct iwl_led_ops {
-       int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-};
-
-struct iwl_legacy_ops {
-       void (*post_associate)(struct iwl_priv *priv);
-       void (*config_ap)(struct iwl_priv *priv);
-       /* station management */
-       int (*update_bcast_stations)(struct iwl_priv *priv);
-       int (*manage_ibss_station)(struct iwl_priv *priv,
-                                  struct ieee80211_vif *vif, bool add);
-};
-
-struct iwl_ops {
-       const struct iwl_lib_ops *lib;
-       const struct iwl_hcmd_ops *hcmd;
-       const struct iwl_hcmd_utils_ops *utils;
-       const struct iwl_led_ops *led;
-       const struct iwl_nic_ops *nic;
-       const struct iwl_legacy_ops *legacy;
-       const struct ieee80211_ops *ieee80211_ops;
-};
-
-struct iwl_mod_params {
-       int sw_crypto;          /* def: 0 = using hardware encryption */
-       int disable_hw_scan;    /* def: 0 = use h/w scan */
-       int num_of_queues;      /* def: HW dependent */
-       int disable_11n;        /* def: 0 = 11n capabilities enabled */
-       int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
-       int antenna;            /* def: 0 = both antennas (use diversity) */
-       int restart_fw;         /* def: 1 = restart firmware */
-};
-
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- *     to the deviation to achieve the desired led frequency.
- *     The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- *     sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- *     chain noise calibration operation
- */
-struct iwl_base_params {
-       int eeprom_size;
-       int num_of_queues;      /* def: HW dependent */
-       int num_of_ampdu_queues;/* def: HW dependent */
-       /* for iwl_legacy_apm_init() */
-       u32 pll_cfg_val;
-       bool set_l0s;
-       bool use_bsm;
-
-       u16 led_compensation;
-       int chain_noise_num_beacons;
-       unsigned int wd_timeout;
-       bool temperature_kelvin;
-       const bool ucode_tracing;
-       const bool sensitivity_calib_by_driver;
-       const bool chain_noise_calib_by_driver;
-};
-
-/**
- * struct iwl_cfg
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- *     (.ucode) will be added to filename before loading from disk. The
- *     filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- *     Driver interacts with Firmware API version >= 2.
- * } else {
- *     Driver interacts with Firmware API version 1.
- * }
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions as well as their API
- * versions.
- *
- */
-struct iwl_cfg {
-       /* params specific to an individual device within a device family */
-       const char *name;
-       const char *fw_name_pre;
-       const unsigned int ucode_api_max;
-       const unsigned int ucode_api_min;
-       u8   valid_tx_ant;
-       u8   valid_rx_ant;
-       unsigned int sku;
-       u16  eeprom_ver;
-       u16  eeprom_calib_ver;
-       const struct iwl_ops *ops;
-       /* module based parameters which can be set from modprobe cmd */
-       const struct iwl_mod_params *mod_params;
-       /* params not likely to change within a device family */
-       struct iwl_base_params *base_params;
-       /* params likely to change within a device family */
-       u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
-       enum iwl_led_mode led_mode;
-};
-
-/***************************
- *   L i b                 *
- ***************************/
-
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif, u16 queue,
-                   const struct ieee80211_tx_queue_params *params);
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
-                       int hw_decrypt);
-int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx);
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx);
-int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
-                       struct ieee80211_channel *ch,
-                       struct iwl_rxon_context *ctx);
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx,
-                           enum ieee80211_band band,
-                           struct ieee80211_vif *vif);
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
-                                 enum ieee80211_band band);
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
-                       struct iwl_ht_config *ht_conf);
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx,
-                           struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
-                                  struct iwl_rxon_context *ctx);
-void iwl_legacy_set_rate(struct iwl_priv *priv);
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
-                          struct ieee80211_hdr *hdr,
-                          u32 decrypt_res,
-                          struct ieee80211_rx_status *stats);
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
-int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
-                         struct ieee80211_vif *vif);
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif);
-int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
-                            struct ieee80211_vif *vif,
-                            enum nl80211_iftype newtype, bool newp2p);
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_legacy_txq_mem(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
-                               u16 length, struct ieee80211_hdr *header);
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
-                               u16 length, struct ieee80211_hdr *header);
-const char *iwl_legacy_get_mgmt_string(int cmd);
-const char *iwl_legacy_get_ctrl_string(int cmd);
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
-                     u16 len);
-#else
-static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
-       return 0;
-}
-static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
-                     u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
-                     u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
-                                   __le16 fc, u16 len)
-{
-}
-#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
-                          struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
-                       struct iwl_rx_mem_buffer *rxb);
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
-                                 struct iwl_rx_queue *q);
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
-                                         struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
-                               struct iwl_rx_packet *pkt);
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
-                                       struct iwl_tx_queue *txq);
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
-                     int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
-                       struct iwl_tx_queue *txq,
-                       int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
-                           struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_legacy_init_scan_params(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_legacy_force_scan_end(struct iwl_priv *priv);
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif,
-                   struct cfg80211_scan_request *req);
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
-u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
-                       struct ieee80211_mgmt *frame,
-                      const u8 *ta, const u8 *ie, int ie_len, int left);
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
-                             enum ieee80211_band band,
-                             u8 n_probes);
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
-                              enum ieee80211_band band,
-                              struct ieee80211_vif *vif);
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
-#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG                (HZ * 7)
-
-/*****************************************************
- *   S e n d i n g     H o s t     C o m m a n d s   *
- *****************************************************/
-
-const char *iwl_legacy_get_cmd_string(u8 cmd);
-int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
-                                  struct iwl_host_cmd *cmd);
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
-                                 u16 len, const void *data);
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
-                          const void *data,
-                          void (*callback)(struct iwl_priv *priv,
-                                           struct iwl_device_cmd *cmd,
-                                           struct iwl_rx_packet *pkt));
-
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI                                              *
- *****************************************************/
-
-static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
-{
-       int pos;
-       u16 pci_lnk_ctl;
-       pos = pci_pcie_cap(priv->pci_dev);
-       pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
-       return pci_lnk_ctl;
-}
-
-void iwl_legacy_bg_watchdog(unsigned long data);
-u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
-                                       u32 usec, u32 beacon_interval);
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
-                          u32 addon, u32 beacon_interval);
-
-#ifdef CONFIG_PM
-int iwl_legacy_pci_suspend(struct device *device);
-int iwl_legacy_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_legacy_pm_ops;
-
-#define IWL_LEGACY_PM_OPS      (&iwl_legacy_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_LEGACY_PM_OPS      NULL
-
-#endif /* !CONFIG_PM */
-
-/*****************************************************
-*  Error Handling Debugging
-******************************************************/
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
-                            struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
-                                          struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
-
-/*****************************************************
-*  GEOS
-******************************************************/
-int iwl_legacy_init_geos(struct iwl_priv *priv);
-void iwl_legacy_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS   *****/
-
-#define STATUS_HCMD_ACTIVE     0       /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED     2
-#define STATUS_RF_KILL_HW      3
-#define STATUS_CT_KILL         4
-#define STATUS_INIT            5
-#define STATUS_ALIVE           6
-#define STATUS_READY           7
-#define STATUS_TEMPERATURE     8
-#define STATUS_GEO_CONFIGURED  9
-#define STATUS_EXIT_PENDING    10
-#define STATUS_STATISTICS      12
-#define STATUS_SCANNING                13
-#define STATUS_SCAN_ABORTING   14
-#define STATUS_SCAN_HW         15
-#define STATUS_POWER_PMI       16
-#define STATUS_FW_ERROR                17
-#define STATUS_CHANNEL_SWITCH_PENDING 18
-
-static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
-{
-       /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
-        * set but EXIT_PENDING is not */
-       return test_bit(STATUS_READY, &priv->status) &&
-              test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
-              !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
-{
-       return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_legacy_is_init(struct iwl_priv *priv)
-{
-       return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
-{
-       return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
-{
-       return iwl_legacy_is_rfkill_hw(priv);
-}
-
-static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
-{
-       return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
-{
-
-       if (iwl_legacy_is_rfkill(priv))
-               return 0;
-
-       return iwl_legacy_is_ready(priv);
-}
-
-extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
-extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
-                                      u8 flags, bool clear);
-void iwl_legacy_apm_stop(struct iwl_priv *priv);
-int iwl_legacy_apm_init(struct iwl_priv *priv);
-
-int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
-                               struct iwl_rxon_context *ctx);
-static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
-                                     struct iwl_rxon_context *ctx)
-{
-       return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
-}
-static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
-                                     struct iwl_rxon_context *ctx)
-{
-       return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
-                       struct iwl_priv *priv, enum ieee80211_band band)
-{
-       return priv->hw->wiphy->bands[band];
-}
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif,
-                                    struct ieee80211_bss_conf *bss_conf,
-                                    u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
-                               struct ieee80211_tx_info *info,
-                               __le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data);
-
-#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644 (file)
index ae13112..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_debug_h__
-#define __iwl_legacy_debug_h__
-
-struct iwl_priv;
-extern u32 iwlegacy_debug_level;
-
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
-
-#define iwl_print_hex_error(priv, p, len)                               \
-do {                                                                   \
-       print_hex_dump(KERN_ERR, "iwl data: ",                          \
-                      DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);           \
-} while (0)
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...)                         \
-do {                                                                   \
-       if (iwl_legacy_get_debug_level(__priv) & (level))                       \
-               dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),         \
-                        "%c %s " fmt, in_interrupt() ? 'I' : 'U',      \
-                       __func__ , ## args);                            \
-} while (0)
-
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)                   \
-do {                                                                   \
-       if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit())  \
-               dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev),         \
-                       "%c %s " fmt, in_interrupt() ? 'I' : 'U',       \
-                        __func__ , ## args);                           \
-} while (0)
-
-#define iwl_print_hex_dump(priv, level, p, len)                        \
-do {                                                                   \
-       if (iwl_legacy_get_debug_level(priv) & level)                           \
-               print_hex_dump(KERN_DEBUG, "iwl data: ",                \
-                              DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);   \
-} while (0)
-
-#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
-                                     const void *p, u32 len)
-{}
-#endif                         /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int
-iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
-       return 0;
-}
-static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif                         /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-/*
- * To use the debug system:
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of
- *
- * #define IWL_DL_xxxx VALUE
- *
- * where xxxx should be the name of the classification (for example, WEP).
- *
- * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
- * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * The active debug levels can be accessed via files
- *
- *     /sys/module/iwl4965/parameters/debug{50}
- *     /sys/module/iwl3945/parameters/debug
- *     /sys/class/net/wlan0/device/debug_level
- *
- * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
- */
-
-/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO            (1 << 0)
-#define IWL_DL_MAC80211                (1 << 1)
-#define IWL_DL_HCMD            (1 << 2)
-#define IWL_DL_STATE           (1 << 3)
-/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP         (1 << 4)
-#define IWL_DL_HCMD_DUMP       (1 << 5)
-#define IWL_DL_EEPROM          (1 << 6)
-#define IWL_DL_RADIO           (1 << 7)
-/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER           (1 << 8)
-#define IWL_DL_TEMP            (1 << 9)
-#define IWL_DL_NOTIF           (1 << 10)
-#define IWL_DL_SCAN            (1 << 11)
-/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC           (1 << 12)
-#define IWL_DL_DROP            (1 << 13)
-#define IWL_DL_TXPOWER         (1 << 14)
-#define IWL_DL_AP              (1 << 15)
-/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW              (1 << 16)
-#define IWL_DL_RF_KILL         (1 << 17)
-#define IWL_DL_FW_ERRORS       (1 << 18)
-#define IWL_DL_LED             (1 << 19)
-/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE            (1 << 20)
-#define IWL_DL_CALIB           (1 << 21)
-#define IWL_DL_WEP             (1 << 22)
-#define IWL_DL_TX              (1 << 23)
-/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX              (1 << 24)
-#define IWL_DL_ISR             (1 << 25)
-#define IWL_DL_HT              (1 << 26)
-#define IWL_DL_IO              (1 << 27)
-/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H             (1 << 28)
-#define IWL_DL_STATS           (1 << 29)
-#define IWL_DL_TX_REPLY                (1 << 30)
-#define IWL_DL_QOS             (1 << 31)
-
-#define IWL_DEBUG_INFO(p, f, a...)     IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
-#define IWL_DEBUG_TEMP(p, f, a...)     IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
-#define IWL_DEBUG_SCAN(p, f, a...)     IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
-#define IWL_DEBUG_RX(p, f, a...)       IWL_DEBUG(p, IWL_DL_RX, f, ## a)
-#define IWL_DEBUG_TX(p, f, a...)       IWL_DEBUG(p, IWL_DL_TX, f, ## a)
-#define IWL_DEBUG_ISR(p, f, a...)      IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
-#define IWL_DEBUG_LED(p, f, a...)      IWL_DEBUG(p, IWL_DL_LED, f, ## a)
-#define IWL_DEBUG_WEP(p, f, a...)      IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(p, f, a...)       IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...)   IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
-#define IWL_DEBUG_CALIB(p, f, a...)    IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
-#define IWL_DEBUG_FW(p, f, a...)       IWL_DEBUG(p, IWL_DL_FW, f, ## a)
-#define IWL_DEBUG_RF_KILL(p, f, a...)  IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_DROP(p, f, a...)     IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_DROP_LIMIT(p, f, a...)       \
-               IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...)       IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...)  IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
-#define IWL_DEBUG_IO(p, f, a...)       IWL_DEBUG(p, IWL_DL_IO, f, ## a)
-#define IWL_DEBUG_RATE(p, f, a...)     IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_RATE_LIMIT(p, f, a...)       \
-               IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...)    IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
-#define IWL_DEBUG_ASSOC(p, f, a...)    \
-               IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...)      \
-               IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_HT(p, f, a...)       IWL_DEBUG(p, IWL_DL_HT, f, ## a)
-#define IWL_DEBUG_STATS(p, f, a...)    IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...)      \
-               IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
-               IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...)      IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
-#define IWL_DEBUG_RADIO(p, f, a...)    IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
-#define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
-#define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644 (file)
index 1407dca..0000000
+++ /dev/null
@@ -1,1314 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/ieee80211.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do {                      \
-       if (!debugfs_create_file(#name, mode, parent, priv,             \
-                        &iwl_legacy_dbgfs_##name##_ops))               \
-               goto err;                                               \
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {                       \
-       struct dentry *__tmp;                                           \
-       __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR,           \
-                                   parent, ptr);                       \
-       if (IS_ERR(__tmp) || !__tmp)                                    \
-               goto err;                                               \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do {                                \
-       struct dentry *__tmp;                                           \
-       __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR,            \
-                                  parent, ptr);                        \
-       if (IS_ERR(__tmp) || !__tmp)                                    \
-               goto err;                                               \
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FUNC(name)                                         \
-static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file,               \
-                                       char __user *user_buf,          \
-                                       size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name)                                        \
-static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file,              \
-                                       const char __user *user_buf,    \
-                                       size_t count, loff_t *ppos);
-
-
-static int
-iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
-       file->private_data = inode->i_private;
-       return 0;
-}
-
-#define DEBUGFS_READ_FILE_OPS(name)                            \
-       DEBUGFS_READ_FUNC(name);                                        \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
-       .read = iwl_legacy_dbgfs_##name##_read,                         \
-       .open = iwl_legacy_dbgfs_open_file_generic,                     \
-       .llseek = generic_file_llseek,                                  \
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name)                           \
-       DEBUGFS_WRITE_FUNC(name);                                       \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
-       .write = iwl_legacy_dbgfs_##name##_write,                       \
-       .open = iwl_legacy_dbgfs_open_file_generic,                     \
-       .llseek = generic_file_llseek,                                  \
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name)                           \
-       DEBUGFS_READ_FUNC(name);                                        \
-       DEBUGFS_WRITE_FUNC(name);                                       \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = {  \
-       .write = iwl_legacy_dbgfs_##name##_write,                       \
-       .read = iwl_legacy_dbgfs_##name##_read,                         \
-       .open = iwl_legacy_dbgfs_open_file_generic,                     \
-       .llseek = generic_file_llseek,                                  \
-};
-
-static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       char *buf;
-       int pos = 0;
-
-       int cnt;
-       ssize_t ret;
-       const size_t bufsz = 100 +
-               sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
-       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "\t%25s\t\t: %u\n",
-                                iwl_legacy_get_mgmt_string(cnt),
-                                priv->tx_stats.mgmt[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
-       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "\t%25s\t\t: %u\n",
-                                iwl_legacy_get_ctrl_string(cnt),
-                                priv->tx_stats.ctrl[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
-       pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
-                        priv->tx_stats.data_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
-                        priv->tx_stats.data_bytes);
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       u32 clear_flag;
-       char buf[8];
-       int buf_size;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%x", &clear_flag) != 1)
-               return -EFAULT;
-       iwl_legacy_clear_traffic_stats(priv);
-
-       return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       char *buf;
-       int pos = 0;
-       int cnt;
-       ssize_t ret;
-       const size_t bufsz = 100 +
-               sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
-       for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "\t%25s\t\t: %u\n",
-                                iwl_legacy_get_mgmt_string(cnt),
-                                priv->rx_stats.mgmt[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
-       for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "\t%25s\t\t: %u\n",
-                                iwl_legacy_get_ctrl_string(cnt),
-                                priv->rx_stats.ctrl[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
-       pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
-                        priv->rx_stats.data_cnt);
-       pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
-                        priv->rx_stats.data_bytes);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
-static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       u32 val;
-       char *buf;
-       ssize_t ret;
-       int i;
-       int pos = 0;
-       struct iwl_priv *priv = file->private_data;
-       size_t bufsz;
-
-       /* default is to dump the entire data segment */
-       if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
-               priv->dbgfs_sram_offset = 0x800000;
-               if (priv->ucode_type == UCODE_INIT)
-                       priv->dbgfs_sram_len = priv->ucode_init_data.len;
-               else
-                       priv->dbgfs_sram_len = priv->ucode_data.len;
-       }
-       bufsz =  30 + priv->dbgfs_sram_len * sizeof(char) * 10;
-       buf = kmalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
-                       priv->dbgfs_sram_len);
-       pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
-                       priv->dbgfs_sram_offset);
-       for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
-               val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
-                                       priv->dbgfs_sram_len - i);
-               if (i < 4) {
-                       switch (i) {
-                       case 1:
-                               val &= BYTE1_MASK;
-                               break;
-                       case 2:
-                               val &= BYTE2_MASK;
-                               break;
-                       case 3:
-                               val &= BYTE3_MASK;
-                               break;
-                       }
-               }
-               if (!(i % 16))
-                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-               pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[64];
-       int buf_size;
-       u32 offset, len;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-
-       if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
-               priv->dbgfs_sram_offset = offset;
-               priv->dbgfs_sram_len = len;
-       } else {
-               priv->dbgfs_sram_offset = 0;
-               priv->dbgfs_sram_len = 0;
-       }
-
-       return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       struct iwl_station_entry *station;
-       int max_sta = priv->hw_params.max_stations;
-       char *buf;
-       int i, j, pos = 0;
-       ssize_t ret;
-       /* Add 30 for initial string */
-       const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
-
-       buf = kmalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
-                       priv->num_stations);
-
-       for (i = 0; i < max_sta; i++) {
-               station = &priv->stations[i];
-               if (!station->used)
-                       continue;
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                "station %d - addr: %pM, flags: %#x\n",
-                                i, station->sta.sta.addr,
-                                station->sta.station_flags_msk);
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "TID\tseq_num\ttxq_id\tframes\ttfds\t");
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "start_idx\tbitmap\t\t\trate_n_flags\n");
-
-               for (j = 0; j < MAX_TID_COUNT; j++) {
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                               "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
-                               j, station->tid[j].seq_number,
-                               station->tid[j].agg.txq_id,
-                               station->tid[j].agg.frame_count,
-                               station->tid[j].tfds_in_queue,
-                               station->tid[j].agg.start_idx,
-                               station->tid[j].agg.bitmap,
-                               station->tid[j].agg.rate_n_flags);
-
-                       if (station->tid[j].agg.wait_for_ba)
-                               pos += scnprintf(buf + pos, bufsz - pos,
-                                                " - waitforba");
-                       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-               }
-
-               pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       }
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
-                                      char __user *user_buf,
-                                      size_t count,
-                                      loff_t *ppos)
-{
-       ssize_t ret;
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0, ofs = 0, buf_size = 0;
-       const u8 *ptr;
-       char *buf;
-       u16 eeprom_ver;
-       size_t eeprom_len = priv->cfg->base_params->eeprom_size;
-       buf_size = 4 * eeprom_len + 256;
-
-       if (eeprom_len % 16) {
-               IWL_ERR(priv, "NVM size is not multiple of 16.\n");
-               return -ENODATA;
-       }
-
-       ptr = priv->eeprom;
-       if (!ptr) {
-               IWL_ERR(priv, "Invalid EEPROM memory\n");
-               return -ENOMEM;
-       }
-
-       /* 4 characters for byte 0xYY */
-       buf = kzalloc(buf_size, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-       eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
-       pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
-                       "version: 0x%x\n", eeprom_ver);
-       for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-               hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-                                  buf_size - pos, 0);
-               pos += strlen(buf + pos);
-               if (buf_size - pos > 0)
-                       buf[pos++] = '\n';
-       }
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       struct ieee80211_channel *channels = NULL;
-       const struct ieee80211_supported_band *supp_band = NULL;
-       int pos = 0, i, bufsz = PAGE_SIZE;
-       char *buf;
-       ssize_t ret;
-
-       if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
-               return -EAGAIN;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
-       if (supp_band) {
-               channels = supp_band->channels;
-
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "Displaying %d channels in 2.4GHz band 802.11bg):\n",
-                               supp_band->n_channels);
-
-               for (i = 0; i < supp_band->n_channels; i++)
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                               "%d: %ddBm: BSS%s%s, %s.\n",
-                               channels[i].hw_value,
-                               channels[i].max_power,
-                               channels[i].flags & IEEE80211_CHAN_RADAR ?
-                               " (IEEE 802.11h required)" : "",
-                               ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
-                               || (channels[i].flags &
-                               IEEE80211_CHAN_RADAR)) ? "" :
-                               ", IBSS",
-                               channels[i].flags &
-                               IEEE80211_CHAN_PASSIVE_SCAN ?
-                               "passive only" : "active/passive");
-       }
-       supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
-       if (supp_band) {
-               channels = supp_band->channels;
-
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "Displaying %d channels in 5.2GHz band (802.11a)\n",
-                               supp_band->n_channels);
-
-               for (i = 0; i < supp_band->n_channels; i++)
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                               "%d: %ddBm: BSS%s%s, %s.\n",
-                               channels[i].hw_value,
-                               channels[i].max_power,
-                               channels[i].flags & IEEE80211_CHAN_RADAR ?
-                               " (IEEE 802.11h required)" : "",
-                               ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
-                               || (channels[i].flags &
-                               IEEE80211_CHAN_RADAR)) ? "" :
-                               ", IBSS",
-                               channels[i].flags &
-                               IEEE80211_CHAN_PASSIVE_SCAN ?
-                               "passive only" : "active/passive");
-       }
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       char buf[512];
-       int pos = 0;
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
-               test_bit(STATUS_HCMD_ACTIVE, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
-               test_bit(STATUS_INT_ENABLED, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
-               test_bit(STATUS_RF_KILL_HW, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
-               test_bit(STATUS_CT_KILL, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
-               test_bit(STATUS_INIT, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
-               test_bit(STATUS_ALIVE, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
-               test_bit(STATUS_READY, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
-               test_bit(STATUS_TEMPERATURE, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
-               test_bit(STATUS_GEO_CONFIGURED, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
-               test_bit(STATUS_EXIT_PENDING, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
-               test_bit(STATUS_STATISTICS, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
-               test_bit(STATUS_SCANNING, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
-               test_bit(STATUS_SCAN_ABORTING, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
-               test_bit(STATUS_SCAN_HW, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
-               test_bit(STATUS_POWER_PMI, &priv->status));
-       pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
-               test_bit(STATUS_FW_ERROR, &priv->status));
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       int cnt = 0;
-       char *buf;
-       int bufsz = 24 * 64; /* 24 items * 64 char per item */
-       ssize_t ret;
-
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "Interrupt Statistics Report:\n");
-
-       pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
-               priv->isr_stats.hw);
-       pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
-               priv->isr_stats.sw);
-       if (priv->isr_stats.sw || priv->isr_stats.hw) {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                       "\tLast Restarting Code:  0x%X\n",
-                       priv->isr_stats.err_code);
-       }
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
-               priv->isr_stats.sch);
-       pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
-               priv->isr_stats.alive);
-#endif
-       pos += scnprintf(buf + pos, bufsz - pos,
-               "HW RF KILL switch toggled:\t %u\n",
-               priv->isr_stats.rfkill);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
-               priv->isr_stats.ctkill);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
-               priv->isr_stats.wakeup);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-               "Rx command responses:\t\t %u\n",
-               priv->isr_stats.rx);
-       for (cnt = 0; cnt < REPLY_MAX; cnt++) {
-               if (priv->isr_stats.rx_handlers[cnt] > 0)
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                               "\tRx handler[%36s]:\t\t %u\n",
-                               iwl_legacy_get_cmd_string(cnt),
-                               priv->isr_stats.rx_handlers[cnt]);
-       }
-
-       pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
-               priv->isr_stats.tx);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
-               priv->isr_stats.unhandled);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[8];
-       int buf_size;
-       u32 reset_flag;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%x", &reset_flag) != 1)
-               return -EFAULT;
-       if (reset_flag == 0)
-               iwl_legacy_clear_isr_stats(priv);
-
-       return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       struct iwl_rxon_context *ctx;
-       int pos = 0, i;
-       char buf[256 * NUM_IWL_RXON_CTX];
-       const size_t bufsz = sizeof(buf);
-
-       for_each_context(priv, ctx) {
-               pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
-                                ctx->ctxid);
-               for (i = 0; i < AC_NUM; i++) {
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                               "\tcw_min\tcw_max\taifsn\ttxop\n");
-                       pos += scnprintf(buf + pos, bufsz - pos,
-                               "AC[%d]\t%u\t%u\t%u\t%u\n", i,
-                               ctx->qos_data.def_qos_parm.ac[i].cw_min,
-                               ctx->qos_data.def_qos_parm.ac[i].cw_max,
-                               ctx->qos_data.def_qos_parm.ac[i].aifsn,
-                               ctx->qos_data.def_qos_parm.ac[i].edca_txop);
-               }
-               pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       }
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[8];
-       int buf_size;
-       int ht40;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%d", &ht40) != 1)
-               return -EFAULT;
-       if (!iwl_legacy_is_any_associated(priv))
-               priv->disable_ht40 = ht40 ? true : false;
-       else {
-               IWL_ERR(priv, "Sta associated with AP - "
-                       "Change to 40MHz channel support is not allowed\n");
-               return -EINVAL;
-       }
-
-       return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[100];
-       int pos = 0;
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "11n 40MHz Mode: %s\n",
-                       priv->disable_ht40 ? "Disabled" : "Enabled");
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_FILE_OPS(nvm);
-DEBUGFS_READ_FILE_OPS(stations);
-DEBUGFS_READ_FILE_OPS(channels);
-DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0, ofs = 0;
-       int cnt = 0, entry;
-       struct iwl_tx_queue *txq;
-       struct iwl_queue *q;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       char *buf;
-       int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
-               (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
-       const u8 *ptr;
-       ssize_t ret;
-
-       if (!priv->txq) {
-               IWL_ERR(priv, "txq not ready\n");
-               return -EAGAIN;
-       }
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate buffer\n");
-               return -ENOMEM;
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
-       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-               txq = &priv->txq[cnt];
-               q = &txq->q;
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "q[%d]: read_ptr: %u, write_ptr: %u\n",
-                               cnt, q->read_ptr, q->write_ptr);
-       }
-       if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
-               ptr = priv->tx_traffic;
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
-               for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
-                       for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
-                            entry++,  ofs += 16) {
-                               pos += scnprintf(buf + pos, bufsz - pos,
-                                               "0x%.4x ", ofs);
-                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
-                                                  buf + pos, bufsz - pos, 0);
-                               pos += strlen(buf + pos);
-                               if (bufsz - pos > 0)
-                                       buf[pos++] = '\n';
-                       }
-               }
-       }
-
-       pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "read: %u, write: %u\n",
-                        rxq->read, rxq->write);
-
-       if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
-               ptr = priv->rx_traffic;
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
-               for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
-                       for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
-                            entry++,  ofs += 16) {
-                               pos += scnprintf(buf + pos, bufsz - pos,
-                                               "0x%.4x ", ofs);
-                               hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
-                                                  buf + pos, bufsz - pos, 0);
-                               pos += strlen(buf + pos);
-                               if (bufsz - pos > 0)
-                                       buf[pos++] = '\n';
-                       }
-               }
-       }
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[8];
-       int buf_size;
-       int traffic_log;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%d", &traffic_log) != 1)
-               return -EFAULT;
-       if (traffic_log == 0)
-               iwl_legacy_reset_traffic_log(priv);
-
-       return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       struct iwl_tx_queue *txq;
-       struct iwl_queue *q;
-       char *buf;
-       int pos = 0;
-       int cnt;
-       int ret;
-       const size_t bufsz = sizeof(char) * 64 *
-                               priv->cfg->base_params->num_of_queues;
-
-       if (!priv->txq) {
-               IWL_ERR(priv, "txq not ready\n");
-               return -EAGAIN;
-       }
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-               txq = &priv->txq[cnt];
-               q = &txq->q;
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "hwq %.2d: read=%u write=%u stop=%d"
-                               " swq_id=%#.2x (ac %d/hwq %d)\n",
-                               cnt, q->read_ptr, q->write_ptr,
-                               !!test_bit(cnt, priv->queue_stopped),
-                               txq->swq_id, txq->swq_id & 3,
-                               (txq->swq_id >> 2) & 0x1f);
-               if (cnt >= 4)
-                       continue;
-               /* for the ACs, display the stop count too */
-               pos += scnprintf(buf + pos, bufsz - pos,
-                               "        stop-count: %d\n",
-                               atomic_read(&priv->queue_stop_count[cnt]));
-       }
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       char buf[256];
-       int pos = 0;
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
-                                               rxq->read);
-       pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
-                                               rxq->write);
-       pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
-                                               rxq->free_count);
-       if (rxq->rb_stts) {
-               pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
-                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
-       } else {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                       "closed_rb_num: Not Allocated\n");
-       }
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
-                       user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
-                       user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
-                       user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       int cnt = 0;
-       char *buf;
-       int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
-       ssize_t ret;
-       struct iwl_sensitivity_data *data;
-
-       data = &priv->sensitivity_data;
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
-                       data->auto_corr_ofdm);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "auto_corr_ofdm_mrc:\t\t %u\n",
-                       data->auto_corr_ofdm_mrc);
-       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
-                       data->auto_corr_ofdm_x1);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "auto_corr_ofdm_mrc_x1:\t\t %u\n",
-                       data->auto_corr_ofdm_mrc_x1);
-       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
-                       data->auto_corr_cck);
-       pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
-                       data->auto_corr_cck_mrc);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "last_bad_plcp_cnt_ofdm:\t\t %u\n",
-                       data->last_bad_plcp_cnt_ofdm);
-       pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
-                       data->last_fa_cnt_ofdm);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "last_bad_plcp_cnt_cck:\t\t %u\n",
-                       data->last_bad_plcp_cnt_cck);
-       pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
-                       data->last_fa_cnt_cck);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
-                       data->nrg_curr_state);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
-                       data->nrg_prev_state);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
-       for (cnt = 0; cnt < 10; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos, " %u",
-                               data->nrg_value[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
-       for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos, " %u",
-                               data->nrg_silence_rssi[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
-                       data->nrg_silence_ref);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
-                       data->nrg_energy_idx);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
-                       data->nrg_silence_idx);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
-                       data->nrg_th_cck);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "nrg_auto_corr_silence_diff:\t %u\n",
-                       data->nrg_auto_corr_silence_diff);
-       pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
-                       data->num_in_cck_no_fa);
-       pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
-                       data->nrg_th_ofdm);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-
-static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       int cnt = 0;
-       char *buf;
-       int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
-       ssize_t ret;
-       struct iwl_chain_noise_data *data;
-
-       data = &priv->chain_noise_data;
-       buf = kzalloc(bufsz, GFP_KERNEL);
-       if (!buf) {
-               IWL_ERR(priv, "Can not allocate Buffer\n");
-               return -ENOMEM;
-       }
-
-       pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
-                       data->active_chains);
-       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
-                       data->chain_noise_a);
-       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
-                       data->chain_noise_b);
-       pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
-                       data->chain_noise_c);
-       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
-                       data->chain_signal_a);
-       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
-                       data->chain_signal_b);
-       pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
-                       data->chain_signal_c);
-       pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
-                       data->beacon_count);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
-       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos, " %u",
-                               data->disconn_array[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
-       for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
-               pos += scnprintf(buf + pos, bufsz - pos, " %u",
-                               data->delta_gain_code[cnt]);
-       }
-       pos += scnprintf(buf + pos, bufsz - pos, "\n");
-       pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
-                       data->radio_write);
-       pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
-                       data->state);
-
-       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-       kfree(buf);
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
-                                                   char __user *user_buf,
-                                                   size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[60];
-       int pos = 0;
-       const size_t bufsz = sizeof(buf);
-       u32 pwrsave_status;
-
-       pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
-                       CSR_GP_REG_POWER_SAVE_STATUS_MSK;
-
-       pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
-       pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
-               (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
-               (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
-               (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
-               "error");
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[8];
-       int buf_size;
-       int clear;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%d", &clear) != 1)
-               return -EFAULT;
-
-       /* make request to uCode to retrieve statistics information */
-       mutex_lock(&priv->mutex);
-       iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
-       mutex_unlock(&priv->mutex);
-
-       return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int len = 0;
-       char buf[20];
-
-       len = sprintf(buf, "0x%04X\n",
-               le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
-                                               char __user *user_buf,
-                                               size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int len = 0;
-       char buf[20];
-
-       len = sprintf(buf, "0x%04X\n",
-       le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
-                                        char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char *buf;
-       int pos = 0;
-       ssize_t ret = -EFAULT;
-
-       if (priv->cfg->ops->lib->dump_fh) {
-               ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
-               if (buf) {
-                       ret = simple_read_from_buffer(user_buf,
-                                                     count, ppos, buf, pos);
-                       kfree(buf);
-               }
-       }
-
-       return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char buf[12];
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
-                       priv->missed_beacon_threshold);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
-                                        const char __user *user_buf,
-                                        size_t count, loff_t *ppos)
-{
-       struct iwl_priv *priv = file->private_data;
-       char buf[8];
-       int buf_size;
-       int missed;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%d", &missed) != 1)
-               return -EINVAL;
-
-       if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
-           missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
-               priv->missed_beacon_threshold =
-                       IWL_MISSED_BEACON_THRESHOLD_DEF;
-       else
-               priv->missed_beacon_threshold = missed;
-
-       return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
-                                       char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       int pos = 0;
-       char buf[300];
-       const size_t bufsz = sizeof(buf);
-       struct iwl_force_reset *force_reset;
-
-       force_reset = &priv->force_reset;
-
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "\tnumber of reset request: %d\n",
-                       force_reset->reset_request_count);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "\tnumber of reset request success: %d\n",
-                       force_reset->reset_success_count);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "\tnumber of reset request reject: %d\n",
-                       force_reset->reset_reject_count);
-       pos += scnprintf(buf + pos, bufsz - pos,
-                       "\treset duration: %lu\n",
-                       force_reset->reset_duration);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       int ret;
-       struct iwl_priv *priv = file->private_data;
-
-       ret = iwl_legacy_force_reset(priv, true);
-
-       return ret ? ret : count;
-}
-
-static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
-                                       const char __user *user_buf,
-                                       size_t count, loff_t *ppos) {
-
-       struct iwl_priv *priv = file->private_data;
-       char buf[8];
-       int buf_size;
-       int timeout;
-
-       memset(buf, 0, sizeof(buf));
-       buf_size = min(count, sizeof(buf) -  1);
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       if (sscanf(buf, "%d", &timeout) != 1)
-               return -EINVAL;
-       if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
-               timeout = IWL_DEF_WD_TIMEOUT;
-
-       priv->cfg->base_params->wd_timeout = timeout;
-       iwl_legacy_setup_watchdog(priv);
-       return count;
-}
-
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
-DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_general_stats);
-DEBUGFS_READ_FILE_OPS(sensitivity);
-DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(power_save_status);
-DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
-DEBUGFS_READ_FILE_OPS(rxon_flags);
-DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
-
-/*
- * Create the debugfs files and directories
- *
- */
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
-       struct dentry *phyd = priv->hw->wiphy->debugfsdir;
-       struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
-
-       dir_drv = debugfs_create_dir(name, phyd);
-       if (!dir_drv)
-               return -ENOMEM;
-
-       priv->debugfs_dir = dir_drv;
-
-       dir_data = debugfs_create_dir("data", dir_drv);
-       if (!dir_data)
-               goto err;
-       dir_rf = debugfs_create_dir("rf", dir_drv);
-       if (!dir_rf)
-               goto err;
-       dir_debug = debugfs_create_dir("debug", dir_drv);
-       if (!dir_debug)
-               goto err;
-
-       DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
-       DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
-       DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
-       DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
-       DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
-       DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
-       DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
-       DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
-       DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
-       DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
-
-       if (priv->cfg->base_params->sensitivity_calib_by_driver)
-               DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
-       if (priv->cfg->base_params->chain_noise_calib_by_driver)
-               DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
-       DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
-       DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
-       DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
-       if (priv->cfg->base_params->sensitivity_calib_by_driver)
-               DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
-                                &priv->disable_sens_cal);
-       if (priv->cfg->base_params->chain_noise_calib_by_driver)
-               DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
-                                &priv->disable_chain_noise_cal);
-       DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
-                               &priv->disable_tx_power_cal);
-       return 0;
-
-err:
-       IWL_ERR(priv, "Can't create the debugfs directory\n");
-       iwl_legacy_dbgfs_unregister(priv);
-       return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-       if (!priv->debugfs_dir)
-               return;
-
-       debugfs_remove_recursive(priv->debugfs_dir);
-       priv->debugfs_dir = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644 (file)
index 9c786ed..0000000
+++ /dev/null
@@ -1,1364 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-4965-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_legacy_dev_h__
-#define __iwl_legacy_dev_h__
-
-#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/wait.h>
-#include <net/ieee80211_radiotap.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-legacy-rs.h"
-
-struct iwl_tx_queue;
-
-/* CT-KILL constants */
-#define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
-
-/* Default noise level to report when noise measurement is not available.
- *   This may be because we're:
- *   1)  Not associated (4965, no beacon statistics being sent to driver)
- *   2)  Scanning (noise measurement does not apply to associated channel)
- *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- *   Also, -127 works better than 0 when averaging frames with/without
- *   noise info (e.g. averaging might be done in app); measured dBm values are
- *   always negative ... using a negative value as the default keeps all
- *   averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- *   a value of 0 means RTS on all data/management packets
- *   a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- *   than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD     2347U
-#define MIN_RTS_THRESHOLD         0U
-#define MAX_RTS_THRESHOLD         2347U
-#define MAX_MSDU_SIZE            2304U
-#define MAX_MPDU_SIZE            2346U
-#define DEFAULT_BEACON_INTERVAL   100U
-#define        DEFAULT_SHORT_RETRY_LIMIT 7U
-#define        DEFAULT_LONG_RETRY_LIMIT  4U
-
-struct iwl_rx_mem_buffer {
-       dma_addr_t page_dma;
-       struct page *page;
-       struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
-       /* only for SYNC commands, iff the reply skb is wanted */
-       struct iwl_host_cmd *source;
-       /*
-        * only for ASYNC commands
-        * (which is somewhat stupid -- look at iwl-sta.c for instance
-        * which duplicates a bunch of code because the callback isn't
-        * invoked for SYNC commands, if it were and its result passed
-        * through it would be simpler...)
-        */
-       void (*callback)(struct iwl_priv *priv,
-                        struct iwl_device_cmd *cmd,
-                        struct iwl_rx_packet *pkt);
-
-       /* The CMD_SIZE_HUGE flag bit indicates that the command
-        * structure is stored at the end of the shared queue memory. */
-       u32 flags;
-
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues
- */
-struct iwl_queue {
-       int n_bd;              /* number of BDs in this queue */
-       int write_ptr;       /* 1-st empty entry (index) host_w*/
-       int read_ptr;         /* last used entry (index) host_r*/
-       /* use for monitoring and recovering the stuck queue */
-       dma_addr_t dma_addr;   /* physical addr for BD's */
-       int n_window;          /* safe queue window */
-       u32 id;
-       int low_mark;          /* low watermark, resume queue if free
-                               * space more than this */
-       int high_mark;         /* high watermark, stop queue if free
-                               * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
-       struct sk_buff *skb;
-       struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
-       struct iwl_queue q;
-       void *tfds;
-       struct iwl_device_cmd **cmd;
-       struct iwl_cmd_meta *meta;
-       struct iwl_tx_info *txb;
-       unsigned long time_stamp;
-       u8 need_update;
-       u8 sched_retry;
-       u8 active;
-       u8 swq_id;
-};
-
-#define IWL_NUM_SCAN_RATES         (2)
-
-struct iwl4965_channel_tgd_info {
-       u8 type;
-       s8 max_power;
-};
-
-struct iwl4965_channel_tgh_info {
-       s64 last_radar_time;
-};
-
-#define IWL4965_MAX_RATE (33)
-
-struct iwl3945_clip_group {
-       /* maximum power level to prevent clipping for each rate, derived by
-        *   us from this band's saturation power in EEPROM */
-       const s8 clip_powers[IWL_MAX_RATES];
-};
-
-/* current Tx power values to use, one for each rate for each channel.
- * requested power is limited by:
- * -- regulatory EEPROM limits for this channel
- * -- hardware capabilities (clip-powers)
- * -- spectrum management
- * -- user preference (e.g. iwconfig)
- * when requested power is set, base power index must also be set. */
-struct iwl3945_channel_power_info {
-       struct iwl3945_tx_power tpc;    /* actual radio and DSP gain settings */
-       s8 power_table_index;   /* actual (compenst'd) index into gain table */
-       s8 base_power_index;    /* gain index for power at factory temp. */
-       s8 requested_power;     /* power (dBm) requested for this chnl/rate */
-};
-
-/* current scan Tx power values to use, one for each scan rate for each
- * channel. */
-struct iwl3945_scan_power_info {
-       struct iwl3945_tx_power tpc;    /* actual radio and DSP gain settings */
-       s8 power_table_index;   /* actual (compenst'd) index into gain table */
-       s8 requested_power;     /* scan pwr (dBm) requested for chnl/rate */
-};
-
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- *     with one another!
- */
-struct iwl_channel_info {
-       struct iwl4965_channel_tgd_info tgd;
-       struct iwl4965_channel_tgh_info tgh;
-       struct iwl_eeprom_channel eeprom;       /* EEPROM regulatory limit */
-       struct iwl_eeprom_channel ht40_eeprom;  /* EEPROM regulatory limit for
-                                                * HT40 channel */
-
-       u8 channel;       /* channel number */
-       u8 flags;         /* flags copied from EEPROM */
-       s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
-       s8 curr_txpow;    /* (dBm) regulatory/spectrum/user (not h/w) limit */
-       s8 min_power;     /* always 0 */
-       s8 scan_power;    /* (dBm) regul. eeprom, direct scans, any rate */
-
-       u8 group_index;   /* 0-4, maps channel to group1/2/3/4/5 */
-       u8 band_index;    /* 0-4, maps channel to band1/2/3/4/5 */
-       enum ieee80211_band band;
-
-       /* HT40 channel info */
-       s8 ht40_max_power_avg;  /* (dBm) regul. eeprom, normal Tx, any rate */
-       u8 ht40_flags;          /* flags copied from EEPROM */
-       u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-
-       /* Radio/DSP gain settings for each "normal" data Tx rate.
-        * These include, in addition to RF and DSP gain, a few fields for
-        *   remembering/modifying gain settings (indexes). */
-       struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
-
-       /* Radio/DSP gain settings for each scan rate, for directed scans. */
-       struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
-};
-
-#define IWL_TX_FIFO_BK         0       /* shared */
-#define IWL_TX_FIFO_BE         1
-#define IWL_TX_FIFO_VI         2       /* shared */
-#define IWL_TX_FIFO_VO         3
-#define IWL_TX_FIFO_UNUSED     -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES     10
-
-#define IWL_DEFAULT_CMD_QUEUE_NUM      4
-
-#define IEEE80211_DATA_LEN              2304
-#define IEEE80211_4ADDR_LEN             30
-#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl_frame {
-       union {
-               struct ieee80211_hdr frame;
-               struct iwl_tx_beacon_cmd beacon;
-               u8 raw[IEEE80211_FRAME_LEN];
-               u8 cmd[360];
-       } u;
-       struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
-       CMD_SYNC = 0,
-       CMD_SIZE_NORMAL = 0,
-       CMD_NO_SKB = 0,
-       CMD_SIZE_HUGE = (1 << 0),
-       CMD_ASYNC = (1 << 1),
-       CMD_WANT_SKB = (1 << 2),
-       CMD_MAPPED = (1 << 3),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for a scan command
- * (which is relatively huge; space is allocated separately).
- */
-struct iwl_device_cmd {
-       struct iwl_cmd_header hdr;      /* uCode API */
-       union {
-               u32 flags;
-               u8 val8;
-               u16 val16;
-               u32 val32;
-               struct iwl_tx_cmd tx;
-               u8 payload[DEF_CMD_PAYLOAD_SIZE];
-       } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-
-struct iwl_host_cmd {
-       const void *data;
-       unsigned long reply_page;
-       void (*callback)(struct iwl_priv *priv,
-                        struct iwl_device_cmd *cmd,
-                        struct iwl_rx_packet *pkt);
-       u32 flags;
-       u16 len;
-       u8 id;
-};
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
-
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
-       __le32 *bd;
-       dma_addr_t bd_dma;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-       u32 read;
-       u32 write;
-       u32 free_count;
-       u32 write_actual;
-       struct list_head rx_free;
-       struct list_head rx_used;
-       int need_update;
-       struct iwl_rb_status *rb_stts;
-       dma_addr_t rb_stts_dma;
-       spinlock_t lock;
-};
-
-#define IWL_SUPPORTED_RATES_IE_LEN         8
-
-#define MAX_TID_COUNT        9
-
-#define IWL_INVALID_RATE     0xFF
-#define IWL_INVALID_VALUE    -1
-
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA).  This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
-       u16 txq_id;
-       u16 frame_count;
-       u16 wait_for_ba;
-       u16 start_idx;
-       u64 bitmap;
-       u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
-       u8 state;
-};
-
-
-struct iwl_tid_data {
-       u16 seq_number; /* 4965 only */
-       u16 tfds_in_queue;
-       struct iwl_ht_agg agg;
-};
-
-struct iwl_hw_key {
-       u32 cipher;
-       int keylen;
-       u8 keyidx;
-       u8 key[32];
-};
-
-union iwl_ht_rate_supp {
-       u16 rates;
-       struct {
-               u8 siso_rate;
-               u8 mimo_rate;
-       };
-};
-
-#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
-
-struct iwl_ht_config {
-       bool single_chain_sufficient;
-       enum ieee80211_smps_mode smps; /* current smps mode */
-};
-
-/* QoS structures */
-struct iwl_qos_info {
-       int qos_active;
-       struct iwl_qosparam_cmd def_qos_parm;
-};
-
-/*
- * Structure should be accessed with sta_lock held. When station addition
- * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
- * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
- * sta_lock held.
- */
-struct iwl_station_entry {
-       struct iwl_legacy_addsta_cmd sta;
-       struct iwl_tid_data tid[MAX_TID_COUNT];
-       u8 used, ctxid;
-       struct iwl_hw_key keyinfo;
-       struct iwl_link_quality_cmd *lq;
-};
-
-struct iwl_station_priv_common {
-       struct iwl_rxon_context *ctx;
-       u8 sta_id;
-};
-
-/*
- * iwl_station_priv: Driver's private station information
- *
- * When mac80211 creates a station it reserves some space (hw->sta_data_size)
- * in the structure for use by driver. This structure is places in that
- * space.
- *
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl_station_priv {
-       struct iwl_station_priv_common common;
-       struct iwl_lq_sta lq_sta;
-       atomic_t pending_frames;
-       bool client;
-       bool asleep;
-};
-
-/**
- * struct iwl_vif_priv - driver's private per-interface information
- *
- * When mac80211 allocates a virtual interface, it can allocate
- * space for us to put data into.
- */
-struct iwl_vif_priv {
-       struct iwl_rxon_context *ctx;
-       u8 ibss_bssid_sta_id;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
-       void *v_addr;           /* access by driver */
-       dma_addr_t p_addr;      /* access by card's busmaster DMA */
-       u32 len;                /* bytes */
-};
-
-/* uCode file layout */
-struct iwl_ucode_header {
-       __le32 ver;     /* major/minor/API/serial */
-       struct {
-               __le32 inst_size;       /* bytes of runtime code */
-               __le32 data_size;       /* bytes of runtime data */
-               __le32 init_size;       /* bytes of init code */
-               __le32 init_data_size;  /* bytes of init data */
-               __le32 boot_size;       /* bytes of bootstrap code */
-               u8 data[0];             /* in same order as sizes */
-       } v1;
-};
-
-struct iwl4965_ibss_seq {
-       u8 mac[ETH_ALEN];
-       u16 seq_num;
-       u16 frag_num;
-       unsigned long packet_time;
-       struct list_head list;
-};
-
-struct iwl_sensitivity_ranges {
-       u16 min_nrg_cck;
-       u16 max_nrg_cck;
-
-       u16 nrg_th_cck;
-       u16 nrg_th_ofdm;
-
-       u16 auto_corr_min_ofdm;
-       u16 auto_corr_min_ofdm_mrc;
-       u16 auto_corr_min_ofdm_x1;
-       u16 auto_corr_min_ofdm_mrc_x1;
-
-       u16 auto_corr_max_ofdm;
-       u16 auto_corr_max_ofdm_mrc;
-       u16 auto_corr_max_ofdm_x1;
-       u16 auto_corr_max_ofdm_mrc_x1;
-
-       u16 auto_corr_max_cck;
-       u16 auto_corr_max_cck_mrc;
-       u16 auto_corr_min_cck;
-       u16 auto_corr_min_cck_mrc;
-
-       u16 barker_corr_th_min;
-       u16 barker_corr_th_min_mrc;
-       u16 nrg_th_cca;
-};
-
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
-       u8 max_txq_num;
-       u8 dma_chnl_num;
-       u16 scd_bc_tbls_size;
-       u32 tfd_size;
-       u8  tx_chains_num;
-       u8  rx_chains_num;
-       u8  valid_tx_ant;
-       u8  valid_rx_ant;
-       u16 max_rxq_size;
-       u16 max_rxq_log;
-       u32 rx_page_order;
-       u32 rx_wrt_ptr_reg;
-       u8  max_stations;
-       u8  ht40_channel;
-       u8  max_beacon_itrvl;   /* in 1024 ms */
-       u32 max_inst_size;
-       u32 max_data_size;
-       u32 max_bsm_size;
-       u32 ct_kill_threshold; /* value in hw-dependent units */
-       u16 beacon_time_tsf_bits;
-       const struct iwl_sensitivity_ranges *sens;
-};
-
-
-/******************************************************************************
- *
- * Functions implemented in core module which are forward declared here
- * for use by iwl-[4-5].c
- *
- * NOTE:  The implementation of these functions are not hardware specific
- * which is why they are in the core module files.
- *
- * Naming convention --
- * iwl_         <-- Is part of iwlwifi
- * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl4965_bg_      <-- Called from work queue context
- * iwl4965_mac_     <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
-extern int iwl_legacy_queue_space(const struct iwl_queue *q);
-static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
-{
-       return q->write_ptr >= q->read_ptr ?
-               (i >= q->read_ptr && i < q->write_ptr) :
-               !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
-                                                               int is_huge)
-{
-       /*
-        * This is for init calibration result and scan command which
-        * required buffer > TFD_MAX_PAYLOAD_SIZE,
-        * the big buffer at end of command array
-        */
-       if (is_huge)
-               return q->n_window;     /* must be power of 2 */
-
-       /* Otherwise, use normal size buffers */
-       return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
-       dma_addr_t dma;
-       void *addr;
-       size_t size;
-};
-
-#define IWL_OPERATION_MODE_AUTO     0
-#define IWL_OPERATION_MODE_HT_ONLY  1
-#define IWL_OPERATION_MODE_MIXED    2
-#define IWL_OPERATION_MODE_20MHZ    3
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
-
-/* Sensitivity and chain noise calibration */
-#define INITIALIZATION_VALUE           0xFFFF
-#define IWL4965_CAL_NUM_BEACONS                20
-#define IWL_CAL_NUM_BEACONS            16
-#define MAXIMUM_ALLOWED_PATHLOSS       15
-
-#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
-
-#define MAX_FA_OFDM  50
-#define MIN_FA_OFDM  5
-#define MAX_FA_CCK   50
-#define MIN_FA_CCK   5
-
-#define AUTO_CORR_STEP_OFDM       1
-
-#define AUTO_CORR_STEP_CCK     3
-#define AUTO_CORR_MAX_TH_CCK   160
-
-#define NRG_DIFF               2
-#define NRG_STEP_CCK           2
-#define NRG_MARGIN             8
-#define MAX_NUMBER_CCK_NO_FA 100
-
-#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
-
-#define CHAIN_A             0
-#define CHAIN_B             1
-#define CHAIN_C             2
-#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
-#define ALL_BAND_FILTER                        0xFF00
-#define IN_BAND_FILTER                 0xFF
-#define MIN_AVERAGE_NOISE_MAX_VALUE    0xFFFFFFFF
-
-#define NRG_NUM_PREV_STAT_L     20
-#define NUM_RX_CHAINS           3
-
-enum iwl4965_false_alarm_state {
-       IWL_FA_TOO_MANY = 0,
-       IWL_FA_TOO_FEW = 1,
-       IWL_FA_GOOD_RANGE = 2,
-};
-
-enum iwl4965_chain_noise_state {
-       IWL_CHAIN_NOISE_ALIVE = 0,  /* must be 0 */
-       IWL_CHAIN_NOISE_ACCUMULATE,
-       IWL_CHAIN_NOISE_CALIBRATED,
-       IWL_CHAIN_NOISE_DONE,
-};
-
-enum iwl4965_calib_enabled_state {
-       IWL_CALIB_DISABLED = 0,  /* must be 0 */
-       IWL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
-       IWL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
-       void *buf;
-       size_t buf_len;
-};
-
-enum ucode_type {
-       UCODE_NONE = 0,
-       UCODE_INIT,
-       UCODE_RT
-};
-
-/* Sensitivity calib data */
-struct iwl_sensitivity_data {
-       u32 auto_corr_ofdm;
-       u32 auto_corr_ofdm_mrc;
-       u32 auto_corr_ofdm_x1;
-       u32 auto_corr_ofdm_mrc_x1;
-       u32 auto_corr_cck;
-       u32 auto_corr_cck_mrc;
-
-       u32 last_bad_plcp_cnt_ofdm;
-       u32 last_fa_cnt_ofdm;
-       u32 last_bad_plcp_cnt_cck;
-       u32 last_fa_cnt_cck;
-
-       u32 nrg_curr_state;
-       u32 nrg_prev_state;
-       u32 nrg_value[10];
-       u8  nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
-       u32 nrg_silence_ref;
-       u32 nrg_energy_idx;
-       u32 nrg_silence_idx;
-       u32 nrg_th_cck;
-       s32 nrg_auto_corr_silence_diff;
-       u32 num_in_cck_no_fa;
-       u32 nrg_th_ofdm;
-
-       u16 barker_corr_th_min;
-       u16 barker_corr_th_min_mrc;
-       u16 nrg_th_cca;
-};
-
-/* Chain noise (differential Rx gain) calib data */
-struct iwl_chain_noise_data {
-       u32 active_chains;
-       u32 chain_noise_a;
-       u32 chain_noise_b;
-       u32 chain_noise_c;
-       u32 chain_signal_a;
-       u32 chain_signal_b;
-       u32 chain_signal_c;
-       u16 beacon_count;
-       u8 disconn_array[NUM_RX_CHAINS];
-       u8 delta_gain_code[NUM_RX_CHAINS];
-       u8 radio_write;
-       u8 state;
-};
-
-#define        EEPROM_SEM_TIMEOUT 10           /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000    /* number of attempts (not time) */
-
-#define IWL_TRAFFIC_ENTRIES    (256)
-#define IWL_TRAFFIC_ENTRY_SIZE  (64)
-
-enum {
-       MEASUREMENT_READY = (1 << 0),
-       MEASUREMENT_ACTIVE = (1 << 1),
-};
-
-/* interrupt statistics */
-struct isr_statistics {
-       u32 hw;
-       u32 sw;
-       u32 err_code;
-       u32 sch;
-       u32 alive;
-       u32 rfkill;
-       u32 ctkill;
-       u32 wakeup;
-       u32 rx;
-       u32 rx_handlers[REPLY_MAX];
-       u32 tx;
-       u32 unhandled;
-};
-
-/* management statistics */
-enum iwl_mgmt_stats {
-       MANAGEMENT_ASSOC_REQ = 0,
-       MANAGEMENT_ASSOC_RESP,
-       MANAGEMENT_REASSOC_REQ,
-       MANAGEMENT_REASSOC_RESP,
-       MANAGEMENT_PROBE_REQ,
-       MANAGEMENT_PROBE_RESP,
-       MANAGEMENT_BEACON,
-       MANAGEMENT_ATIM,
-       MANAGEMENT_DISASSOC,
-       MANAGEMENT_AUTH,
-       MANAGEMENT_DEAUTH,
-       MANAGEMENT_ACTION,
-       MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
-       CONTROL_BACK_REQ =  0,
-       CONTROL_BACK,
-       CONTROL_PSPOLL,
-       CONTROL_RTS,
-       CONTROL_CTS,
-       CONTROL_ACK,
-       CONTROL_CFEND,
-       CONTROL_CFENDACK,
-       CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-       u32 mgmt[MANAGEMENT_MAX];
-       u32 ctrl[CONTROL_MAX];
-       u32 data_cnt;
-       u64 data_bytes;
-#endif
-};
-
-/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX       (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF       (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN       (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT     (2000)
-#define IWL_LONG_WD_TIMEOUT    (10000)
-#define IWL_MAX_WD_TIMEOUT     (120000)
-
-struct iwl_force_reset {
-       int reset_request_count;
-       int reset_success_count;
-       int reset_reject_count;
-       unsigned long reset_duration;
-       unsigned long last_force_reset_jiffies;
-};
-
-/* extend beacon time format bit shifting  */
-/*
- * for _3945 devices
- * bits 31:24 - extended
- * bits 23:0  - interval
- */
-#define IWL3945_EXT_BEACON_TIME_POS    24
-/*
- * for _4965 devices
- * bits 31:22 - extended
- * bits 21:0  - interval
- */
-#define IWL4965_EXT_BEACON_TIME_POS    22
-
-enum iwl_rxon_context_id {
-       IWL_RXON_CTX_BSS,
-
-       NUM_IWL_RXON_CTX
-};
-
-struct iwl_rxon_context {
-       struct ieee80211_vif *vif;
-
-       const u8 *ac_to_fifo;
-       const u8 *ac_to_queue;
-       u8 mcast_queue;
-
-       /*
-        * We could use the vif to indicate active, but we
-        * also need it to be active during disabling when
-        * we already removed the vif for type setting.
-        */
-       bool always_active, is_active;
-
-       bool ht_need_multiple_chains;
-
-       enum iwl_rxon_context_id ctxid;
-
-       u32 interface_modes, exclusive_interface_modes;
-       u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
-       /*
-        * We declare this const so it can only be
-        * changed via explicit cast within the
-        * routines that actually update the physical
-        * hardware.
-        */
-       const struct iwl_legacy_rxon_cmd active;
-       struct iwl_legacy_rxon_cmd staging;
-
-       struct iwl_rxon_time_cmd timing;
-
-       struct iwl_qos_info qos_data;
-
-       u8 bcast_sta_id, ap_sta_id;
-
-       u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
-       u8 qos_cmd;
-       u8 wep_key_cmd;
-
-       struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
-       u8 key_mapping_keys;
-
-       __le32 station_flags;
-
-       struct {
-               bool non_gf_sta_present;
-               u8 protection;
-               bool enabled, is_40mhz;
-               u8 extension_chan_offset;
-       } ht;
-};
-
-struct iwl_priv {
-
-       /* ieee device used by generic ieee processing code */
-       struct ieee80211_hw *hw;
-       struct ieee80211_channel *ieee_channels;
-       struct ieee80211_rate *ieee_rates;
-       struct iwl_cfg *cfg;
-
-       /* temporary frame storage list */
-       struct list_head free_frames;
-       int frames_count;
-
-       enum ieee80211_band band;
-       int alloc_rxb_page;
-
-       void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-                                      struct iwl_rx_mem_buffer *rxb);
-
-       struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
-       /* spectrum measurement report caching */
-       struct iwl_spectrum_notification measure_report;
-       u8 measurement_status;
-
-       /* ucode beacon time */
-       u32 ucode_beacon_time;
-       int missed_beacon_threshold;
-
-       /* track IBSS manager (last beacon) status */
-       u32 ibss_manager;
-
-       /* force reset */
-       struct iwl_force_reset force_reset;
-
-       /* we allocate array of iwl_channel_info for NIC's valid channels.
-        *    Access via channel # using indirect index array */
-       struct iwl_channel_info *channel_info;  /* channel info array */
-       u8 channel_count;       /* # of channels */
-
-       /* thermal calibration */
-       s32 temperature;        /* degrees Kelvin */
-       s32 last_temperature;
-
-       /* init calibration results */
-       struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
-       /* Scan related variables */
-       unsigned long scan_start;
-       unsigned long scan_start_tsf;
-       void *scan_cmd;
-       enum ieee80211_band scan_band;
-       struct cfg80211_scan_request *scan_request;
-       struct ieee80211_vif *scan_vif;
-       u8 scan_tx_ant[IEEE80211_NUM_BANDS];
-       u8 mgmt_tx_ant;
-
-       /* spinlock */
-       spinlock_t lock;        /* protect general shared data */
-       spinlock_t hcmd_lock;   /* protect hcmd */
-       spinlock_t reg_lock;    /* protect hw register access */
-       struct mutex mutex;
-
-       /* basic pci-network driver stuff */
-       struct pci_dev *pci_dev;
-
-       /* pci hardware address support */
-       void __iomem *hw_base;
-       u32  hw_rev;
-       u32  hw_wa_rev;
-       u8   rev_id;
-
-       /* microcode/device supports multiple contexts */
-       u8 valid_contexts;
-
-       /* command queue number */
-       u8 cmd_queue;
-
-       /* max number of station keys */
-       u8 sta_key_max_num;
-
-       /* EEPROM MAC addresses */
-       struct mac_address addresses[1];
-
-       /* uCode images, save to reload in case of failure */
-       int fw_index;                   /* firmware we're trying to load */
-       u32 ucode_ver;                  /* version of ucode, copy of
-                                          iwl_ucode.ver */
-       struct fw_desc ucode_code;      /* runtime inst */
-       struct fw_desc ucode_data;      /* runtime data original */
-       struct fw_desc ucode_data_backup;       /* runtime data save/restore */
-       struct fw_desc ucode_init;      /* initialization inst */
-       struct fw_desc ucode_init_data; /* initialization data */
-       struct fw_desc ucode_boot;      /* bootstrap inst */
-       enum ucode_type ucode_type;
-       u8 ucode_write_complete;        /* the image write is complete */
-       char firmware_name[25];
-
-       struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
-
-       __le16 switch_channel;
-
-       /* 1st responses from initialize and runtime uCode images.
-        * _4965's initialize alive response contains some calibration data. */
-       struct iwl_init_alive_resp card_alive_init;
-       struct iwl_alive_resp card_alive;
-
-       u16 active_rate;
-
-       u8 start_calib;
-       struct iwl_sensitivity_data sensitivity_data;
-       struct iwl_chain_noise_data chain_noise_data;
-       __le16 sensitivity_tbl[HD_TABLE_SIZE];
-
-       struct iwl_ht_config current_ht_config;
-
-       /* Rate scaling data */
-       u8 retry_rate;
-
-       wait_queue_head_t wait_command_queue;
-
-       int activity_timer_active;
-
-       /* Rx and Tx DMA processing queues */
-       struct iwl_rx_queue rxq;
-       struct iwl_tx_queue *txq;
-       unsigned long txq_ctx_active_msk;
-       struct iwl_dma_ptr  kw; /* keep warm address */
-       struct iwl_dma_ptr  scd_bc_tbls;
-
-       u32 scd_base_addr;      /* scheduler sram base address */
-
-       unsigned long status;
-
-       /* counts mgmt, ctl, and data packets */
-       struct traffic_stats tx_stats;
-       struct traffic_stats rx_stats;
-
-       /* counts interrupts */
-       struct isr_statistics isr_stats;
-
-       struct iwl_power_mgr power_data;
-
-       /* context information */
-       u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-
-       /* station table variables */
-
-       /* Note: if lock and sta_lock are needed, lock must be acquired first */
-       spinlock_t sta_lock;
-       int num_stations;
-       struct iwl_station_entry stations[IWL_STATION_COUNT];
-       unsigned long ucode_key_table;
-
-       /* queue refcounts */
-#define IWL_MAX_HW_QUEUES      32
-       unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
-       /* for each AC */
-       atomic_t queue_stop_count[4];
-
-       /* Indication if ieee80211_ops->open has been called */
-       u8 is_open;
-
-       u8 mac80211_registered;
-
-       /* eeprom -- this is in the card's little endian byte order */
-       u8 *eeprom;
-       struct iwl_eeprom_calib_info *calib_info;
-
-       enum nl80211_iftype iw_mode;
-
-       /* Last Rx'd beacon timestamp */
-       u64 timestamp;
-
-       union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
-               struct {
-                       void *shared_virt;
-                       dma_addr_t shared_phys;
-
-                       struct delayed_work thermal_periodic;
-                       struct delayed_work rfkill_poll;
-
-                       struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-                       struct iwl3945_notif_statistics accum_statistics;
-                       struct iwl3945_notif_statistics delta_statistics;
-                       struct iwl3945_notif_statistics max_delta;
-#endif
-
-                       u32 sta_supp_rates;
-                       int last_rx_rssi;       /* From Rx packet statistics */
-
-                       /* Rx'd packet timing information */
-                       u32 last_beacon_time;
-                       u64 last_tsf;
-
-                       /*
-                        * each calibration channel group in the
-                        * EEPROM has a derived clip setting for
-                        * each rate.
-                        */
-                       const struct iwl3945_clip_group clip_groups[5];
-
-               } _3945;
-#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
-               struct {
-                       struct iwl_rx_phy_res last_phy_res;
-                       bool last_phy_res_valid;
-
-                       struct completion firmware_loading_complete;
-
-                       /*
-                        * chain noise reset and gain commands are the
-                        * two extra calibration commands follows the standard
-                        * phy calibration commands
-                        */
-                       u8 phy_calib_chain_noise_reset_cmd;
-                       u8 phy_calib_chain_noise_gain_cmd;
-
-                       struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-                       struct iwl_notif_statistics accum_statistics;
-                       struct iwl_notif_statistics delta_statistics;
-                       struct iwl_notif_statistics max_delta;
-#endif
-
-               } _4965;
-#endif
-       };
-
-       struct iwl_hw_params hw_params;
-
-       u32 inta_mask;
-
-       struct workqueue_struct *workqueue;
-
-       struct work_struct restart;
-       struct work_struct scan_completed;
-       struct work_struct rx_replenish;
-       struct work_struct abort_scan;
-
-       struct iwl_rxon_context *beacon_ctx;
-       struct sk_buff *beacon_skb;
-
-       struct work_struct tx_flush;
-
-       struct tasklet_struct irq_tasklet;
-
-       struct delayed_work init_alive_start;
-       struct delayed_work alive_start;
-       struct delayed_work scan_check;
-
-       /* TX Power */
-       s8 tx_power_user_lmt;
-       s8 tx_power_device_lmt;
-       s8 tx_power_next;
-
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       /* debugging info */
-       u32 debug_level; /* per device debugging will override global
-                           iwlegacy_debug_level if set */
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-       /* debugfs */
-       u16 tx_traffic_idx;
-       u16 rx_traffic_idx;
-       u8 *tx_traffic;
-       u8 *rx_traffic;
-       struct dentry *debugfs_dir;
-       u32 dbgfs_sram_offset, dbgfs_sram_len;
-       bool disable_ht40;
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-       struct work_struct txpower_work;
-       u32 disable_sens_cal;
-       u32 disable_chain_noise_cal;
-       u32 disable_tx_power_cal;
-       struct work_struct run_time_calib_work;
-       struct timer_list statistics_periodic;
-       struct timer_list watchdog;
-       bool hw_ready;
-
-       struct led_classdev led;
-       unsigned long blink_on, blink_off;
-       bool led_registered;
-}; /*iwl_priv */
-
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
-       set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
-       clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-/*
- * iwl_legacy_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
-       if (priv->debug_level)
-               return priv->debug_level;
-       else
-               return iwlegacy_debug_level;
-}
-#else
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
-       return iwlegacy_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *
-iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
-                                                int txq_id, int idx)
-{
-       if (priv->txq[txq_id].txb[idx].skb)
-               return (struct ieee80211_hdr *)priv->txq[txq_id].
-                               txb[idx].skb->data;
-       return NULL;
-}
-
-static inline struct iwl_rxon_context *
-iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
-       return vif_priv->ctx;
-}
-
-#define for_each_context(priv, ctx)                            \
-       for (ctx = &priv->contexts[IWL_RXON_CTX_BSS];           \
-            ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++)    \
-               if (priv->valid_contexts & BIT(ctx->ctxid))
-
-static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
-                                   enum iwl_rxon_context_id ctxid)
-{
-       return (priv->contexts[ctxid].active.filter_flags &
-                       RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
-{
-       return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-}
-
-static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
-{
-       return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
-{
-       if (ch_info == NULL)
-               return 0;
-       return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
-{
-       return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
-       return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline int
-iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
-{
-       return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int
-iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
-{
-       return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
-}
-
-static inline void
-__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
-{
-       __free_pages(page, priv->hw_params.rx_page_order);
-       priv->alloc_rxb_page--;
-}
-
-static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
-{
-       free_pages(page, priv->hw_params.rx_page_order);
-       priv->alloc_rxb_page--;
-}
-#endif                         /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644 (file)
index acec991..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-
-/* sparse doesn't like tracepoint macros */
-#ifndef __CHECKER__
-#include "iwl-dev.h"
-
-#define CREATE_TRACE_POINTS
-#include "iwl-devtrace.h"
-
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644 (file)
index a443725..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWLWIFI_LEGACY_DEVICE_TRACE
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-
-#define PRIV_ENTRY     __field(struct iwl_priv *, priv)
-#define PRIV_ASSIGN    (__entry->priv = priv)
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_io
-
-TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
-       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
-       TP_ARGS(priv, offs, val),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-               __field(u32, offs)
-               __field(u32, val)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               __entry->offs = offs;
-               __entry->val = val;
-       ),
-       TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
-                                       __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
-       TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
-       TP_ARGS(priv, offs, val),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-               __field(u32, offs)
-               __field(u8, val)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               __entry->offs = offs;
-               __entry->val = val;
-       ),
-       TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
-                                       __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
-       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
-       TP_ARGS(priv, offs, val),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-               __field(u32, offs)
-               __field(u32, val)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               __entry->offs = offs;
-               __entry->val = val;
-       ),
-       TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
-                                       __entry->offs, __entry->val)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_ucode
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
-       TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
-       TP_ARGS(priv, hcmd, len, flags),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-               __dynamic_array(u8, hcmd, len)
-               __field(u32, flags)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               memcpy(__get_dynamic_array(hcmd), hcmd, len);
-               __entry->flags = flags;
-       ),
-       TP_printk("[%p] hcmd %#.2x (%ssync)",
-                 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
-                 __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_rx,
-       TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
-       TP_ARGS(priv, rxbuf, len),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-               __dynamic_array(u8, rxbuf, len)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
-       ),
-       TP_printk("[%p] RX cmd %#.2x",
-                 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_tx,
-       TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
-                void *buf0, size_t buf0_len,
-                void *buf1, size_t buf1_len),
-       TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-
-               __field(size_t, framelen)
-               __dynamic_array(u8, tfd, tfdlen)
-
-               /*
-                * Do not insert between or below these items,
-                * we want to keep the frame together (except
-                * for the possible padding).
-                */
-               __dynamic_array(u8, buf0, buf0_len)
-               __dynamic_array(u8, buf1, buf1_len)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               __entry->framelen = buf0_len + buf1_len;
-               memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
-               memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
-               memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
-       ),
-       TP_printk("[%p] TX %.2x (%zu bytes)",
-                 __entry->priv,
-                 ((u8 *)__get_dynamic_array(buf0))[0],
-                 __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
-       TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
-                u32 data1, u32 data2, u32 line, u32 blink1,
-                u32 blink2, u32 ilink1, u32 ilink2),
-       TP_ARGS(priv, desc, time, data1, data2, line,
-               blink1, blink2, ilink1, ilink2),
-       TP_STRUCT__entry(
-               PRIV_ENTRY
-               __field(u32, desc)
-               __field(u32, time)
-               __field(u32, data1)
-               __field(u32, data2)
-               __field(u32, line)
-               __field(u32, blink1)
-               __field(u32, blink2)
-               __field(u32, ilink1)
-               __field(u32, ilink2)
-       ),
-       TP_fast_assign(
-               PRIV_ASSIGN;
-               __entry->desc = desc;
-               __entry->time = time;
-               __entry->data1 = data1;
-               __entry->data2 = data2;
-               __entry->line = line;
-               __entry->blink1 = blink1;
-               __entry->blink2 = blink2;
-               __entry->ilink1 = ilink1;
-               __entry->ilink2 = ilink2;
-       ),
-       TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
-                 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
-                 __entry->priv, __entry->desc, __entry->time, __entry->data1,
-                 __entry->data2, __entry->line, __entry->blink1,
-                 __entry->blink2, __entry->ilink1, __entry->ilink2)
-);
-
-#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644 (file)
index 5bf3f49..0000000
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwlegacy_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel.  We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware.  This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel.  There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwlegacy_eeprom_band_1[14] = {
-       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwlegacy_eeprom_band_2[] = {   /* 4915-5080MHz */
-       183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwlegacy_eeprom_band_3[] = {   /* 5170-5320MHz */
-       34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwlegacy_eeprom_band_4[] = {   /* 5500-5700MHz */
-       100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwlegacy_eeprom_band_5[] = {   /* 5725-5825MHz */
-       145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwlegacy_eeprom_band_6[] = {       /* 2.4 ht40 channel */
-       1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwlegacy_eeprom_band_7[] = {       /* 5.2 ht40 channel */
-       36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
-{
-       u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
-       int ret = 0;
-
-       IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
-       switch (gp) {
-       case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
-       case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
-               break;
-       default:
-               IWL_ERR(priv, "bad EEPROM signature,"
-                       "EEPROM_GP=0x%08x\n", gp);
-               ret = -ENOENT;
-               break;
-       }
-       return ret;
-}
-
-const u8
-*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
-       BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
-       return &priv->eeprom[offset];
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
-
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
-{
-       if (!priv->eeprom)
-               return 0;
-       return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
-
-/**
- * iwl_legacy_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE:  This routine uses the non-debug IO access functions.
- */
-int iwl_legacy_eeprom_init(struct iwl_priv *priv)
-{
-       __le16 *e;
-       u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
-       int sz;
-       int ret;
-       u16 addr;
-
-       /* allocate eeprom */
-       sz = priv->cfg->base_params->eeprom_size;
-       IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
-       priv->eeprom = kzalloc(sz, GFP_KERNEL);
-       if (!priv->eeprom) {
-               ret = -ENOMEM;
-               goto alloc_err;
-       }
-       e = (__le16 *)priv->eeprom;
-
-       priv->cfg->ops->lib->apm_ops.init(priv);
-
-       ret = iwl_legacy_eeprom_verify_signature(priv);
-       if (ret < 0) {
-               IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
-               ret = -ENOENT;
-               goto err;
-       }
-
-       /* Make sure driver (instead of uCode) is allowed to read EEPROM */
-       ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
-       if (ret < 0) {
-               IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
-               ret = -ENOENT;
-               goto err;
-       }
-
-       /* eeprom is an array of 16bit values */
-       for (addr = 0; addr < sz; addr += sizeof(u16)) {
-               u32 r;
-
-               _iwl_legacy_write32(priv, CSR_EEPROM_REG,
-                            CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
-               ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
-                                         CSR_EEPROM_REG_READ_VALID_MSK,
-                                         CSR_EEPROM_REG_READ_VALID_MSK,
-                                         IWL_EEPROM_ACCESS_TIMEOUT);
-               if (ret < 0) {
-                       IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
-                                                       addr);
-                       goto done;
-               }
-               r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
-               e[addr / 2] = cpu_to_le16(r >> 16);
-       }
-
-       IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
-                      "EEPROM",
-                      iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
-
-       ret = 0;
-done:
-       priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
-
-err:
-       if (ret)
-               iwl_legacy_eeprom_free(priv);
-       /* Reset chip to save power until we load uCode during "up". */
-       iwl_legacy_apm_stop(priv);
-alloc_err:
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_init);
-
-void iwl_legacy_eeprom_free(struct iwl_priv *priv)
-{
-       kfree(priv->eeprom);
-       priv->eeprom = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_free);
-
-static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
-                       int eep_band, int *eeprom_ch_count,
-                       const struct iwl_eeprom_channel **eeprom_ch_info,
-                       const u8 **eeprom_ch_index)
-{
-       u32 offset = priv->cfg->ops->lib->
-                       eeprom_ops.regulatory_bands[eep_band - 1];
-       switch (eep_band) {
-       case 1:         /* 2.4GHz band */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_1;
-               break;
-       case 2:         /* 4.9GHz band */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_2;
-               break;
-       case 3:         /* 5.2GHz band */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_3;
-               break;
-       case 4:         /* 5.5GHz band */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_4;
-               break;
-       case 5:         /* 5.7GHz band */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_5;
-               break;
-       case 6:         /* 2.4GHz ht40 channels */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_6;
-               break;
-       case 7:         /* 5 GHz ht40 channels */
-               *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
-               *eeprom_ch_info = (struct iwl_eeprom_channel *)
-                               iwl_legacy_eeprom_query_addr(priv, offset);
-               *eeprom_ch_index = iwlegacy_eeprom_band_7;
-               break;
-       default:
-               BUG();
-       }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
-                           ? # x " " : "")
-/**
- * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
-                             enum ieee80211_band band, u16 channel,
-                             const struct iwl_eeprom_channel *eeprom_ch,
-                             u8 clear_ht40_extension_channel)
-{
-       struct iwl_channel_info *ch_info;
-
-       ch_info = (struct iwl_channel_info *)
-                       iwl_legacy_get_channel_info(priv, band, channel);
-
-       if (!iwl_legacy_is_channel_valid(ch_info))
-               return -1;
-
-       IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
-                       " Ad-Hoc %ssupported\n",
-                       ch_info->channel,
-                       iwl_legacy_is_channel_a_band(ch_info) ?
-                       "5.2" : "2.4",
-                       CHECK_AND_PRINT(IBSS),
-                       CHECK_AND_PRINT(ACTIVE),
-                       CHECK_AND_PRINT(RADAR),
-                       CHECK_AND_PRINT(WIDE),
-                       CHECK_AND_PRINT(DFS),
-                       eeprom_ch->flags,
-                       eeprom_ch->max_power_avg,
-                       ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
-                        && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
-                       "" : "not ");
-
-       ch_info->ht40_eeprom = *eeprom_ch;
-       ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
-       ch_info->ht40_flags = eeprom_ch->flags;
-       if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
-               ch_info->ht40_extension_channel &=
-                                       ~clear_ht40_extension_channel;
-
-       return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
-                           ? # x " " : "")
-
-/**
- * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_legacy_init_channel_map(struct iwl_priv *priv)
-{
-       int eeprom_ch_count = 0;
-       const u8 *eeprom_ch_index = NULL;
-       const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
-       int band, ch;
-       struct iwl_channel_info *ch_info;
-
-       if (priv->channel_count) {
-               IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
-               return 0;
-       }
-
-       IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
-       priv->channel_count =
-           ARRAY_SIZE(iwlegacy_eeprom_band_1) +
-           ARRAY_SIZE(iwlegacy_eeprom_band_2) +
-           ARRAY_SIZE(iwlegacy_eeprom_band_3) +
-           ARRAY_SIZE(iwlegacy_eeprom_band_4) +
-           ARRAY_SIZE(iwlegacy_eeprom_band_5);
-
-       IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
-                       priv->channel_count);
-
-       priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
-                                    priv->channel_count, GFP_KERNEL);
-       if (!priv->channel_info) {
-               IWL_ERR(priv, "Could not allocate channel_info\n");
-               priv->channel_count = 0;
-               return -ENOMEM;
-       }
-
-       ch_info = priv->channel_info;
-
-       /* Loop through the 5 EEPROM bands adding them in order to the
-        * channel map we maintain (that contains additional information than
-        * what just in the EEPROM) */
-       for (band = 1; band <= 5; band++) {
-
-               iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
-                                       &eeprom_ch_info, &eeprom_ch_index);
-
-               /* Loop through each band adding each of the channels */
-               for (ch = 0; ch < eeprom_ch_count; ch++) {
-                       ch_info->channel = eeprom_ch_index[ch];
-                       ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
-                           IEEE80211_BAND_5GHZ;
-
-                       /* permanently store EEPROM's channel regulatory flags
-                        *   and max power in channel info database. */
-                       ch_info->eeprom = eeprom_ch_info[ch];
-
-                       /* Copy the run-time flags so they are there even on
-                        * invalid channels */
-                       ch_info->flags = eeprom_ch_info[ch].flags;
-                       /* First write that ht40 is not enabled, and then enable
-                        * one by one */
-                       ch_info->ht40_extension_channel =
-                                       IEEE80211_CHAN_NO_HT40;
-
-                       if (!(iwl_legacy_is_channel_valid(ch_info))) {
-                               IWL_DEBUG_EEPROM(priv,
-                                              "Ch. %d Flags %x [%sGHz] - "
-                                              "No traffic\n",
-                                              ch_info->channel,
-                                              ch_info->flags,
-                                              iwl_legacy_is_channel_a_band(ch_info) ?
-                                              "5.2" : "2.4");
-                               ch_info++;
-                               continue;
-                       }
-
-                       /* Initialize regulatory-based run-time data */
-                       ch_info->max_power_avg = ch_info->curr_txpow =
-                           eeprom_ch_info[ch].max_power_avg;
-                       ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
-                       ch_info->min_power = 0;
-
-                       IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
-                                      "%s%s%s%s%s%s(0x%02x %ddBm):"
-                                      " Ad-Hoc %ssupported\n",
-                                      ch_info->channel,
-                                      iwl_legacy_is_channel_a_band(ch_info) ?
-                                      "5.2" : "2.4",
-                                      CHECK_AND_PRINT_I(VALID),
-                                      CHECK_AND_PRINT_I(IBSS),
-                                      CHECK_AND_PRINT_I(ACTIVE),
-                                      CHECK_AND_PRINT_I(RADAR),
-                                      CHECK_AND_PRINT_I(WIDE),
-                                      CHECK_AND_PRINT_I(DFS),
-                                      eeprom_ch_info[ch].flags,
-                                      eeprom_ch_info[ch].max_power_avg,
-                                      ((eeprom_ch_info[ch].
-                                        flags & EEPROM_CHANNEL_IBSS)
-                                       && !(eeprom_ch_info[ch].
-                                            flags & EEPROM_CHANNEL_RADAR))
-                                      ? "" : "not ");
-
-                       ch_info++;
-               }
-       }
-
-       /* Check if we do have HT40 channels */
-       if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
-           EEPROM_REGULATORY_BAND_NO_HT40 &&
-           priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
-           EEPROM_REGULATORY_BAND_NO_HT40)
-               return 0;
-
-       /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
-       for (band = 6; band <= 7; band++) {
-               enum ieee80211_band ieeeband;
-
-               iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
-                                       &eeprom_ch_info, &eeprom_ch_index);
-
-               /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
-               ieeeband =
-                       (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
-               /* Loop through each band adding each of the channels */
-               for (ch = 0; ch < eeprom_ch_count; ch++) {
-                       /* Set up driver's info for lower half */
-                       iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
-                                               eeprom_ch_index[ch],
-                                               &eeprom_ch_info[ch],
-                                               IEEE80211_CHAN_NO_HT40PLUS);
-
-                       /* Set up driver's info for upper half */
-                       iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
-                                               eeprom_ch_index[ch] + 4,
-                                               &eeprom_ch_info[ch],
-                                               IEEE80211_CHAN_NO_HT40MINUS);
-               }
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_channel_map);
-
-/*
- * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
- */
-void iwl_legacy_free_channel_map(struct iwl_priv *priv)
-{
-       kfree(priv->channel_info);
-       priv->channel_count = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_free_channel_map);
-
-/**
- * iwl_legacy_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct
-iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
-                                       enum ieee80211_band band, u16 channel)
-{
-       int i;
-
-       switch (band) {
-       case IEEE80211_BAND_5GHZ:
-               for (i = 14; i < priv->channel_count; i++) {
-                       if (priv->channel_info[i].channel == channel)
-                               return &priv->channel_info[i];
-               }
-               break;
-       case IEEE80211_BAND_2GHZ:
-               if (channel >= 1 && channel <= 14)
-                       return &priv->channel_info[channel - 1];
-               break;
-       default:
-               BUG();
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644 (file)
index c59c810..0000000
+++ /dev/null
@@ -1,344 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_eeprom_h__
-#define __iwl_legacy_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT      5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT         10   /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT     1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- *        It only indicates that 20 MHz channel use is supported; HT40 channel
- *        usage is indicated by a separate set of regulatory flags for each
- *        HT40 channel pair.
- *
- * NOTE:  Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
-       EEPROM_CHANNEL_VALID = (1 << 0),        /* usable for this SKU/geo */
-       EEPROM_CHANNEL_IBSS = (1 << 1),         /* usable as an IBSS channel */
-       /* Bit 2 Reserved */
-       EEPROM_CHANNEL_ACTIVE = (1 << 3),       /* active scanning allowed */
-       EEPROM_CHANNEL_RADAR = (1 << 4),        /* radar detection required */
-       EEPROM_CHANNEL_WIDE = (1 << 5),         /* 20 MHz channel okay */
-       /* Bit 6 Reserved (was Narrow Channel) */
-       EEPROM_CHANNEL_DFS = (1 << 7),  /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-/* 3945 only */
-#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
-#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
-       u8 flags;               /* EEPROM_CHANNEL_* flags copied from EEPROM */
-       s8 max_power_avg;       /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-/* 3945 Specific */
-#define EEPROM_3945_EEPROM_VERSION     (0x2f)
-
-/* 4965 has two radio transmitters (and 3 radio receivers) */
-#define EEPROM_TX_POWER_TX_CHAINS      (2)
-
-/* 4965 has room for up to 8 sets of txpower calibration data */
-#define EEPROM_TX_POWER_BANDS          (8)
-
-/* 4965 factory calibration measures txpower gain settings for
- * each of 3 target output levels */
-#define EEPROM_TX_POWER_MEASUREMENTS   (3)
-
-/* 4965 Specific */
-/* 4965 driver does not work with txpower calibration version < 5 */
-#define EEPROM_4965_TX_POWER_VERSION    (5)
-#define EEPROM_4965_EEPROM_VERSION     (0x2f)
-#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6) /* 2 bytes */
-#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8) /* 48  bytes */
-#define EEPROM_4965_BOARD_REVISION             (2*0x4F) /* 2 bytes */
-#define EEPROM_4965_BOARD_PBA                  (2*0x56+1) /* 9 bytes */
-
-/* 2.4 GHz */
-extern const u8 iwlegacy_eeprom_band_1[14];
-
-/*
- * factory calibration data for one txpower level, on one channel,
- * measured on one of the 2 tx chains (radio transmitter and associated
- * antenna).  EEPROM contains:
- *
- * 1)  Temperature (degrees Celsius) of device when measurement was made.
- *
- * 2)  Gain table index used to achieve the target measurement power.
- *     This refers to the "well-known" gain tables (see iwl-4965-hw.h).
- *
- * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
- *
- * 4)  RF power amplifier detector level measurement (not used).
- */
-struct iwl_eeprom_calib_measure {
-       u8 temperature;         /* Device temperature (Celsius) */
-       u8 gain_idx;            /* Index into gain table */
-       u8 actual_pow;          /* Measured RF output power, half-dBm */
-       s8 pa_det;              /* Power amp detector level (not used) */
-} __packed;
-
-
-/*
- * measurement set for one channel.  EEPROM contains:
- *
- * 1)  Channel number measured
- *
- * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
- *     (a.k.a. "tx chains") (6 measurements altogether)
- */
-struct iwl_eeprom_calib_ch_info {
-       u8 ch_num;
-       struct iwl_eeprom_calib_measure
-               measurements[EEPROM_TX_POWER_TX_CHAINS]
-                       [EEPROM_TX_POWER_MEASUREMENTS];
-} __packed;
-
-/*
- * txpower subband info.
- *
- * For each frequency subband, EEPROM contains the following:
- *
- * 1)  First and last channels within range of the subband.  "0" values
- *     indicate that this sample set is not being used.
- *
- * 2)  Sample measurement sets for 2 channels close to the range endpoints.
- */
-struct iwl_eeprom_calib_subband_info {
-       u8 ch_from;     /* channel number of lowest channel in subband */
-       u8 ch_to;       /* channel number of highest channel in subband */
-       struct iwl_eeprom_calib_ch_info ch1;
-       struct iwl_eeprom_calib_ch_info ch2;
-} __packed;
-
-
-/*
- * txpower calibration info.  EEPROM contains:
- *
- * 1)  Factory-measured saturation power levels (maximum levels at which
- *     tx power amplifier can output a signal without too much distortion).
- *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
- *     values apply to all channels within each of the bands.
- *
- * 2)  Factory-measured power supply voltage level.  This is assumed to be
- *     constant (i.e. same value applies to all channels/bands) while the
- *     factory measurements are being made.
- *
- * 3)  Up to 8 sets of factory-measured txpower calibration values.
- *     These are for different frequency ranges, since txpower gain
- *     characteristics of the analog radio circuitry vary with frequency.
- *
- *     Not all sets need to be filled with data;
- *     struct iwl_eeprom_calib_subband_info contains range of channels
- *     (0 if unused) for each set of data.
- */
-struct iwl_eeprom_calib_info {
-       u8 saturation_power24;  /* half-dBm (e.g. "34" = 17 dBm) */
-       u8 saturation_power52;  /* half-dBm */
-       __le16 voltage;         /* signed */
-       struct iwl_eeprom_calib_subband_info
-               band_info[EEPROM_TX_POWER_BANDS];
-} __packed;
-
-
-/* General */
-#define EEPROM_DEVICE_ID                    (2*0x08)   /* 2 bytes */
-#define EEPROM_MAC_ADDRESS                  (2*0x15)   /* 6  bytes */
-#define EEPROM_BOARD_REVISION               (2*0x35)   /* 2  bytes */
-#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1) /* 9  bytes */
-#define EEPROM_VERSION                      (2*0x44)   /* 2  bytes */
-#define EEPROM_SKU_CAP                      (2*0x45)   /* 2  bytes */
-#define EEPROM_OEM_MODE                     (2*0x46)   /* 2  bytes */
-#define EEPROM_WOWLAN_MODE                  (2*0x47)   /* 2  bytes */
-#define EEPROM_RADIO_CONFIG                 (2*0x48)   /* 2  bytes */
-#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)   /* 2  bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)         /* bits 0-1   */
-#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
-#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
-#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
-#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by iwl has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-#define EEPROM_REGULATORY_SKU_ID            (2*0x60)    /* 4  bytes */
-#define EEPROM_REGULATORY_BAND_1            (2*0x62)   /* 2  bytes */
-#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)   /* 28 bytes */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-#define EEPROM_REGULATORY_BAND_2            (2*0x71)   /* 2  bytes */
-#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)   /* 26 bytes */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-#define EEPROM_REGULATORY_BAND_3            (2*0x7F)   /* 2  bytes */
-#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)   /* 24 bytes */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-#define EEPROM_REGULATORY_BAND_4            (2*0x8C)   /* 2  bytes */
-#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)   /* 22 bytes */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-#define EEPROM_REGULATORY_BAND_5            (2*0x98)   /* 2  bytes */
-#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)   /* 12 bytes */
-
-/*
- * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
- *
- * The channel listed is the center of the lower 20 MHz half of the channel.
- * The overall center frequency is actually 2 channels (10 MHz) above that,
- * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
- * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
- * and the overall HT40 channel width centers on channel 3.
- *
- * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
- *        control channel to which to tune.  RXON also specifies whether the
- *        control channel is the upper or lower half of a HT40 channel.
- *
- * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
- */
-#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)  /* 14 bytes */
-
-/*
- * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
- * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
- */
-#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)  /* 22 bytes */
-
-#define EEPROM_REGULATORY_BAND_NO_HT40                 (0)
-
-struct iwl_eeprom_ops {
-       const u32 regulatory_bands[7];
-       int (*acquire_semaphore) (struct iwl_priv *priv);
-       void (*release_semaphore) (struct iwl_priv *priv);
-};
-
-
-int iwl_legacy_eeprom_init(struct iwl_priv *priv);
-void iwl_legacy_eeprom_free(struct iwl_priv *priv);
-const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
-                                       size_t offset);
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
-int iwl_legacy_init_channel_map(struct iwl_priv *priv);
-void iwl_legacy_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_legacy_get_channel_info(
-               const struct iwl_priv *priv,
-               enum ieee80211_band band, u16 channel);
-
-#endif  /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644 (file)
index 6e60918..0000000
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_fh_h__
-#define __iwl_legacy_fh_h__
-
-/****************************/
-/* Flow Handler Definitions */
-/****************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH_MEM_LOWER_BOUND                   (0x1000)
-#define FH_MEM_UPPER_BOUND                   (0x2000)
-
-/**
- * Keep-Warm (KW) buffer base address.
- *
- * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
- * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
- * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
- * from going into a power-savings mode that would cause higher DRAM latency,
- * and possible data over/under-runs, before all Tx/Rx is complete.
- *
- * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
- * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
- * automatically invokes keep-warm accesses when normal accesses might not
- * be sufficient to maintain fast DRAM response.
- *
- * Bit fields:
- *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
- */
-#define FH_KW_MEM_ADDR_REG                  (FH_MEM_LOWER_BOUND + 0x97C)
-
-
-/**
- * TFD Circular Buffers Base (CBBC) addresses
- *
- * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
- * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
- * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
- * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
- * aligned (address bits 0-7 must be 0).
- *
- * Bit fields in each pointer register:
- *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
- */
-#define FH_MEM_CBBC_LOWER_BOUND          (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND          (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x)  (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
-
-
-/**
- * Rx SRAM Control and Status Registers (RSCSR)
- *
- * These registers provide handshake between driver and 4965 for the Rx queue
- * (this queue handles *all* command responses, notifications, Rx data, etc.
- * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
- * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
- * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
- * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
- * mapping between RBDs and RBs.
- *
- * Driver must allocate host DRAM memory for the following, and set the
- * physical address of each into 4965 registers:
- *
- * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
- *     entries (although any power of 2, up to 4096, is selectable by driver).
- *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
- *     (typically 4K, although 8K or 16K are also selectable by driver).
- *     Driver sets up RB size and number of RBDs in the CB via Rx config
- *     register FH_MEM_RCSR_CHNL0_CONFIG_REG.
- *
- *     Bit fields within one RBD:
- *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
- *
- *     Driver sets physical address [35:8] of base of RBD circular buffer
- *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
- *
- * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
- *     (RBs) have been filled, via a "write pointer", actually the index of
- *     the RB's corresponding RBD within the circular buffer.  Driver sets
- *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
- *
- *     Bit fields in lower dword of Rx status buffer (upper dword not used
- *     by driver; see struct iwl4965_shared, val0):
- *     31-12:  Not used by driver
- *     11- 0:  Index of last filled Rx buffer descriptor
- *             (4965 writes, driver reads this value)
- *
- * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
- * enter pointers to these RBs into contiguous RBD circular buffer entries,
- * and update the 4965's "write" index register,
- * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
- *
- * This "write" index corresponds to the *next* RBD that the driver will make
- * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
- * the circular buffer.  This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example), and must
- * wrap back to 0 at the end of the circular buffer (but don't wrap before
- * "read" index has advanced past 1!  See below).
- * NOTE:  4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
- *
- * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
- * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
- * to tell the driver the index of the latest filled RBD.  The driver must
- * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
- *
- * The driver must also internally keep track of a third index, which is the
- * next RBD to process.  When receiving an Rx interrupt, driver should process
- * all filled but unprocessed RBs up to, but not including, the RB
- * corresponding to the "read" index.  For example, if "read" index becomes "1",
- * driver may process the RB pointed to by RBD 0.  Depending on volume of
- * traffic, there may be many RBs to process.
- *
- * If read index == write index, 4965 thinks there is no room to put new data.
- * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
- * be safe, make sure that there is a gap of at least 2 RBDs between "write"
- * and "read" indexes; that is, make sure that there are no more than 254
- * buffers waiting to be filled.
- */
-#define FH_MEM_RSCSR_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0xBC0)
-#define FH_MEM_RSCSR_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RSCSR_CHNL0             (FH_MEM_RSCSR_LOWER_BOUND)
-
-/**
- * Physical base address of 8-byte Rx Status buffer.
- * Bit fields:
- *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
- */
-#define FH_RSCSR_CHNL0_STTS_WPTR_REG   (FH_MEM_RSCSR_CHNL0)
-
-/**
- * Physical base address of Rx Buffer Descriptor Circular Buffer.
- * Bit fields:
- *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
- */
-#define FH_RSCSR_CHNL0_RBDCB_BASE_REG  (FH_MEM_RSCSR_CHNL0 + 0x004)
-
-/**
- * Rx write pointer (index, really!).
- * Bit fields:
- *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
- *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
- */
-#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG  (FH_MEM_RSCSR_CHNL0 + 0x008)
-#define FH_RSCSR_CHNL0_WPTR        (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
-
-
-/**
- * Rx Config/Status Registers (RCSR)
- * Rx Config Reg for channel 0 (only channel used)
- *
- * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
- * normal operation (see bit fields).
- *
- * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
- * Driver should poll FH_MEM_RSSR_RX_STATUS_REG        for
- * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
- *
- * Bit fields:
- * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- *        '10' operate normally
- * 29-24: reserved
- * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
- *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
- * 19-18: reserved
- * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
- *        '10' 12K, '11' 16K.
- * 15-14: reserved
- * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
- * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
- *        typical value 0x10 (about 1/2 msec)
- *  3- 0: reserved
- */
-#define FH_MEM_RCSR_LOWER_BOUND      (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RCSR_UPPER_BOUND      (FH_MEM_LOWER_BOUND + 0xCC0)
-#define FH_MEM_RCSR_CHNL0            (FH_MEM_RCSR_LOWER_BOUND)
-
-#define FH_MEM_RCSR_CHNL0_CONFIG_REG   (FH_MEM_RCSR_CHNL0)
-
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000) /* bits 12 */
-#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000) /* bits 16-17 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
-#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
-
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS       (20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS     (4)
-#define RX_RB_TIMEOUT  (0x10)
-
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
-
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
-
-#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
-
-#define FH_RSCSR_FRAME_SIZE_MSK        (0x00003FFF)    /* bits 0-13 */
-
-/**
- * Rx Shared Status Registers (RSSR)
- *
- * After stopping Rx DMA channel (writing 0 to
- * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
- * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
- *
- * Bit fields:
- *  24:  1 = Channel 0 is idle
- *
- * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
- * contain default values that should not be altered by the driver.
- */
-#define FH_MEM_RSSR_LOWER_BOUND           (FH_MEM_LOWER_BOUND + 0xC40)
-#define FH_MEM_RSSR_UPPER_BOUND           (FH_MEM_LOWER_BOUND + 0xD00)
-
-#define FH_MEM_RSSR_SHARED_CTRL_REG       (FH_MEM_RSSR_LOWER_BOUND)
-#define FH_MEM_RSSR_RX_STATUS_REG      (FH_MEM_RSSR_LOWER_BOUND + 0x004)
-#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
-                                       (FH_MEM_RSSR_LOWER_BOUND + 0x008)
-
-#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE      (0x01000000)
-
-#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT        28
-
-/* TFDB  Area - TFDs buffer table */
-#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
-#define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
-#define FH_TFDIB_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0x958)
-#define FH_TFDIB_CTRL0_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
-#define FH_TFDIB_CTRL1_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-
-/**
- * Transmit DMA Channel Control/Status Registers (TCSR)
- *
- * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
- * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
- * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
- *
- * To use a Tx DMA channel, driver must initialize its
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
- *
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
- *
- * All other bits should be 0.
- *
- * Bit fields:
- * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- *        '10' operate normally
- * 29- 4: Reserved, set to "0"
- *     3: Enable internal DMA requests (1, normal operation), disable (0)
- *  2- 0: Reserved, set to "0"
- */
-#define FH_TCSR_LOWER_BOUND  (FH_MEM_LOWER_BOUND + 0xD00)
-#define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
-
-/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH49_TCSR_CHNL_NUM                            (7)
-#define FH50_TCSR_CHNL_NUM                            (8)
-
-/* TCSR: tx_config register values */
-#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)      \
-               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
-#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl)      \
-               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)     \
-               (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF         (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV         (0x00000001)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE   (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE    (0x00000008)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT      (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD     (0x00100000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD      (0x00200000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT       (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD      (0x00400000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD       (0x00800000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE       (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF   (0x40000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE      (0x80000000)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY     (0x00000000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT      (0x00002000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID     (0x00000003)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM         (20)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX         (12)
-
-/**
- * Tx Shared Status Registers (TSSR)
- *
- * After stopping Tx DMA channel (writing 0 to
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
- * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
- * (channel's buffers empty | no pending requests).
- *
- * Bit fields:
- * 31-24:  1 = Channel buffers empty (channel 7:0)
- * 23-16:  1 = No pending requests (channel 7:0)
- */
-#define FH_TSSR_LOWER_BOUND            (FH_MEM_LOWER_BOUND + 0xEA0)
-#define FH_TSSR_UPPER_BOUND            (FH_MEM_LOWER_BOUND + 0xEC0)
-
-#define FH_TSSR_TX_STATUS_REG          (FH_TSSR_LOWER_BOUND + 0x010)
-
-/**
- * Bit fields for TSSR(Tx Shared Status & Control) error status register:
- * 31:  Indicates an address error when accessed to internal memory
- *     uCode/driver must write "1" in order to clear this flag
- * 30:  Indicates that Host did not send the expected number of dwords to FH
- *     uCode/driver must write "1" in order to clear this flag
- * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
- *     command was received from the scheduler while the TRB was already full
- *     with previous command
- *     uCode/driver must write "1" in order to clear this flag
- * 7-0: Each status bit indicates a channel's TxCredit error. When an error
- *     bit is set, it indicates that the FH has received a full indication
- *     from the RTC TxFIFO and the current value of the TxCredit counter was
- *     not equal to zero. This mean that the credit mechanism was not
- *     synchronized to the TxFIFO status
- *     uCode/driver must write "1" in order to clear this flag
- */
-#define FH_TSSR_TX_ERROR_REG           (FH_TSSR_LOWER_BOUND + 0x018)
-
-#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
-
-/* Tx service channels */
-#define FH_SRVC_CHNL           (9)
-#define FH_SRVC_LOWER_BOUND    (FH_MEM_LOWER_BOUND + 0x9C8)
-#define FH_SRVC_UPPER_BOUND    (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
-               (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
-
-#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
-/* Instruct FH to increment the retry count of a packet when
- * it is brought from the memory to TX-FIFO
- */
-#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN   (0x00000002)
-
-#define RX_QUEUE_SIZE                         256
-#define RX_QUEUE_MASK                         255
-#define RX_QUEUE_SIZE_LOG                     8
-
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-/**
- * struct iwl_rb_status - reseve buffer status
- *     host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
- *     in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
- *     which was transferred
- */
-struct iwl_rb_status {
-       __le16 closed_rb_num;
-       __le16 closed_fr_num;
-       __le16 finished_rb_num;
-       __le16 finished_fr_nam;
-       __le32 __unused; /* 3945 only */
-} __packed;
-
-
-#define TFD_QUEUE_SIZE_MAX      (256)
-#define TFD_QUEUE_SIZE_BC_DUP  (64)
-#define TFD_QUEUE_BC_SIZE      (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
-#define IWL_NUM_OF_TBS         20
-
-static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
-{
-       return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
-}
-/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
- *
- * This structure contains dma address and length of transmission address
- *
- * @lo: low [31:0] portion of the dma address of TX buffer
- *     every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- *          4-15 length of the tx buffer
- */
-struct iwl_tfd_tb {
-       __le32 lo;
-       __le16 hi_n_len;
-} __packed;
-
-/**
- * struct iwl_tfd
- *
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- *          5   reserved
- *          6-7 padding (not used)
- * @ tbs[20]   transmit frame buffer descriptors
- * @ __pad     padding
- *
- * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
- * Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
- *
- * Driver must indicate the physical address of the base of each
- * circular buffer via the FH_MEM_CBBC_QUEUE registers.
- *
- * Each TFD contains pointer/size information for up to 20 data buffers
- * in host DRAM.  These buffers collectively contain the (one) frame described
- * by the TFD.  Each buffer must be a single contiguous block of memory within
- * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
- * of (4K - 4).  The concatenates all of a TFD's buffers into a single
- * Tx frame, up to 8 KBytes in size.
- *
- * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
- */
-struct iwl_tfd {
-       u8 __reserved1[3];
-       u8 num_tbs;
-       struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
-       __le32 __pad;
-} __packed;
-
-/* Keep Warm Size */
-#define IWL_KW_SIZE 0x1000     /* 4k */
-
-#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644 (file)
index ce1fc9f..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *iwl_legacy_get_cmd_string(u8 cmd)
-{
-       switch (cmd) {
-               IWL_CMD(REPLY_ALIVE);
-               IWL_CMD(REPLY_ERROR);
-               IWL_CMD(REPLY_RXON);
-               IWL_CMD(REPLY_RXON_ASSOC);
-               IWL_CMD(REPLY_QOS_PARAM);
-               IWL_CMD(REPLY_RXON_TIMING);
-               IWL_CMD(REPLY_ADD_STA);
-               IWL_CMD(REPLY_REMOVE_STA);
-               IWL_CMD(REPLY_WEPKEY);
-               IWL_CMD(REPLY_3945_RX);
-               IWL_CMD(REPLY_TX);
-               IWL_CMD(REPLY_RATE_SCALE);
-               IWL_CMD(REPLY_LEDS_CMD);
-               IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
-               IWL_CMD(REPLY_CHANNEL_SWITCH);
-               IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
-               IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
-               IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
-               IWL_CMD(POWER_TABLE_CMD);
-               IWL_CMD(PM_SLEEP_NOTIFICATION);
-               IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
-               IWL_CMD(REPLY_SCAN_CMD);
-               IWL_CMD(REPLY_SCAN_ABORT_CMD);
-               IWL_CMD(SCAN_START_NOTIFICATION);
-               IWL_CMD(SCAN_RESULTS_NOTIFICATION);
-               IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
-               IWL_CMD(BEACON_NOTIFICATION);
-               IWL_CMD(REPLY_TX_BEACON);
-               IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
-               IWL_CMD(REPLY_BT_CONFIG);
-               IWL_CMD(REPLY_STATISTICS_CMD);
-               IWL_CMD(STATISTICS_NOTIFICATION);
-               IWL_CMD(CARD_STATE_NOTIFICATION);
-               IWL_CMD(MISSED_BEACONS_NOTIFICATION);
-               IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
-               IWL_CMD(SENSITIVITY_CMD);
-               IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
-               IWL_CMD(REPLY_RX_PHY_CMD);
-               IWL_CMD(REPLY_RX_MPDU_CMD);
-               IWL_CMD(REPLY_RX);
-               IWL_CMD(REPLY_COMPRESSED_BA);
-       default:
-               return "UNKNOWN";
-
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
-                                    struct iwl_device_cmd *cmd,
-                                    struct iwl_rx_packet *pkt)
-{
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
-               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
-               return;
-       }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       switch (cmd->hdr.cmd) {
-       case REPLY_TX_LINK_QUALITY_CMD:
-       case SENSITIVITY_CMD:
-               IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
-               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
-               break;
-       default:
-               IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
-               iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
-       }
-#endif
-}
-
-static int
-iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-       int ret;
-
-       BUG_ON(!(cmd->flags & CMD_ASYNC));
-
-       /* An asynchronous command can not expect an SKB to be set. */
-       BUG_ON(cmd->flags & CMD_WANT_SKB);
-
-       /* Assign a generic callback if one is not provided */
-       if (!cmd->callback)
-               cmd->callback = iwl_legacy_generic_cmd_callback;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return -EBUSY;
-
-       ret = iwl_legacy_enqueue_hcmd(priv, cmd);
-       if (ret < 0) {
-               IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
-                         iwl_legacy_get_cmd_string(cmd->id), ret);
-               return ret;
-       }
-       return 0;
-}
-
-int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-       int cmd_idx;
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       BUG_ON(cmd->flags & CMD_ASYNC);
-
-        /* A synchronous command can not have a callback set. */
-       BUG_ON(cmd->callback);
-
-       IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
-                       iwl_legacy_get_cmd_string(cmd->id));
-
-       set_bit(STATUS_HCMD_ACTIVE, &priv->status);
-       IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
-                       iwl_legacy_get_cmd_string(cmd->id));
-
-       cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
-       if (cmd_idx < 0) {
-               ret = cmd_idx;
-               IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
-                         iwl_legacy_get_cmd_string(cmd->id), ret);
-               goto out;
-       }
-
-       ret = wait_event_timeout(priv->wait_command_queue,
-                       !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
-                       HOST_COMPLETE_TIMEOUT);
-       if (!ret) {
-               if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
-                       IWL_ERR(priv,
-                               "Error sending %s: time out after %dms.\n",
-                               iwl_legacy_get_cmd_string(cmd->id),
-                               jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
-                       clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-                       IWL_DEBUG_INFO(priv,
-                               "Clearing HCMD_ACTIVE for command %s\n",
-                                      iwl_legacy_get_cmd_string(cmd->id));
-                       ret = -ETIMEDOUT;
-                       goto cancel;
-               }
-       }
-
-       if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
-               IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
-                              iwl_legacy_get_cmd_string(cmd->id));
-               ret = -ECANCELED;
-               goto fail;
-       }
-       if (test_bit(STATUS_FW_ERROR, &priv->status)) {
-               IWL_ERR(priv, "Command %s failed: FW Error\n",
-                              iwl_legacy_get_cmd_string(cmd->id));
-               ret = -EIO;
-               goto fail;
-       }
-       if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
-               IWL_ERR(priv, "Error: Response NULL in '%s'\n",
-                         iwl_legacy_get_cmd_string(cmd->id));
-               ret = -EIO;
-               goto cancel;
-       }
-
-       ret = 0;
-       goto out;
-
-cancel:
-       if (cmd->flags & CMD_WANT_SKB) {
-               /*
-                * Cancel the CMD_WANT_SKB flag for the cmd in the
-                * TX cmd queue. Otherwise in case the cmd comes
-                * in later, it will possibly set an invalid
-                * address (cmd->meta.source).
-                */
-               priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
-                                                       ~CMD_WANT_SKB;
-       }
-fail:
-       if (cmd->reply_page) {
-               iwl_legacy_free_pages(priv, cmd->reply_page);
-               cmd->reply_page = 0;
-       }
-out:
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
-
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-       if (cmd->flags & CMD_ASYNC)
-               return iwl_legacy_send_cmd_async(priv, cmd);
-
-       return iwl_legacy_send_cmd_sync(priv, cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd);
-
-int
-iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
-       struct iwl_host_cmd cmd = {
-               .id = id,
-               .len = len,
-               .data = data,
-       };
-
-       return iwl_legacy_send_cmd_sync(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
-
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
-                          u8 id, u16 len, const void *data,
-                          void (*callback)(struct iwl_priv *priv,
-                                           struct iwl_device_cmd *cmd,
-                                           struct iwl_rx_packet *pkt))
-{
-       struct iwl_host_cmd cmd = {
-               .id = id,
-               .len = len,
-               .data = data,
-       };
-
-       cmd.flags |= CMD_ASYNC;
-       cmd.callback = callback;
-
-       return iwl_legacy_send_cmd_async(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644 (file)
index 5cf23ea..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_helpers_h__
-#define __iwl_legacy_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
-       struct ieee80211_hw *hw)
-{
-       return &hw->conf;
-}
-
-/**
- * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
-{
-       return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
-{
-       return --index & (n_bd - 1);
-}
-
-/* TODO: Move fw_desc functions to iwl-pci.ko */
-static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
-                                   struct fw_desc *desc)
-{
-       if (desc->v_addr)
-               dma_free_coherent(&pci_dev->dev, desc->len,
-                                 desc->v_addr, desc->p_addr);
-       desc->v_addr = NULL;
-       desc->len = 0;
-}
-
-static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
-                                   struct fw_desc *desc)
-{
-       if (!desc->len) {
-               desc->v_addr = NULL;
-               return -EINVAL;
-       }
-
-       desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
-                                         &desc->p_addr, GFP_KERNEL);
-       return (desc->v_addr != NULL) ? 0 : -ENOMEM;
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void
-iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
-       BUG_ON(ac > 3);   /* only have 2 bits */
-       BUG_ON(hwq > 31); /* only use 5 bits */
-
-       txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
-                                 struct iwl_tx_queue *txq)
-{
-       u8 queue = txq->swq_id;
-       u8 ac = queue & 3;
-       u8 hwq = (queue >> 2) & 0x1f;
-
-       if (test_and_clear_bit(hwq, priv->queue_stopped))
-               if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
-                       ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
-                                 struct iwl_tx_queue *txq)
-{
-       u8 queue = txq->swq_id;
-       u8 ac = queue & 3;
-       u8 hwq = (queue >> 2) & 0x1f;
-
-       if (!test_and_set_bit(hwq, priv->queue_stopped))
-               if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
-                       ieee80211_stop_queue(priv->hw, ac);
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
-{
-       clear_bit(STATUS_INT_ENABLED, &priv->status);
-
-       /* disable interrupts from uCode/NIC to host */
-       iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-       /* acknowledge/clear/reset any interrupts still pending
-        * from uCode or flow handler (Rx/Tx DMA) */
-       iwl_write32(priv, CSR_INT, 0xffffffff);
-       iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
-       IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
-{
-       IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
-       iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
-{
-       IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
-       set_bit(STATUS_INT_ENABLED, &priv->status);
-       iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
-                                          u16 tsf_bits)
-{
-       return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
-                                           u16 tsf_bits)
-{
-       return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif                         /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644 (file)
index 5cc5d34..0000000
+++ /dev/null
@@ -1,545 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_io_h__
-#define __iwl_legacy_io_h__
-
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-devtrace.h"
-
-/*
- * IO, register, and NIC memory access functions
- *
- * NOTE on naming convention and macro usage for these
- *
- * A single _ prefix before a an access function means that no state
- * check or debug information is printed when that function is called.
- *
- * A double __ prefix before an access function means that state is checked
- * and the current line number and caller function name are printed in addition
- * to any other debug output.
- *
- * The non-prefixed name is the #define that maps the caller into a
- * #define that provides the caller's name and __LINE__ to the double
- * prefix version.
- *
- * If you wish to call the function without any debug or state checking,
- * you should use the single _ prefix version (as is used by dependent IO
- * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
- * _iwl_legacy_read32.)
- *
- * These declarations are *extremely* useful in quickly isolating code deltas
- * which result in misconfiguration of the hardware I/O.  In combination with
- * git-bisect and the IO debug level you can quickly determine the specific
- * commit which breaks the IO sequence to the hardware.
- *
- */
-
-static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
-{
-       trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
-       iowrite8(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
-                                u32 ofs, u8 val)
-{
-       IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
-       _iwl_legacy_write8(priv, ofs, val);
-}
-#define iwl_write8(priv, ofs, val) \
-       __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
-#endif
-
-
-static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
-{
-       trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
-       iowrite32(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
-                                u32 ofs, u32 val)
-{
-       IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
-       _iwl_legacy_write32(priv, ofs, val);
-}
-#define iwl_write32(priv, ofs, val) \
-       __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
-#endif
-
-static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
-{
-       u32 val = ioread32(priv->hw_base + ofs);
-       trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
-       return val;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32
-__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
-{
-       IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
-       return _iwl_legacy_read32(priv, ofs);
-}
-#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
-#else
-#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
-#endif
-
-#define IWL_POLL_INTERVAL 10   /* microseconds */
-static inline int
-_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
-                               u32 bits, u32 mask, int timeout)
-{
-       int t = 0;
-
-       do {
-               if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
-                       return t;
-               udelay(IWL_POLL_INTERVAL);
-               t += IWL_POLL_INTERVAL;
-       } while (t < timeout);
-
-       return -ETIMEDOUT;
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
-                                struct iwl_priv *priv, u32 addr,
-                                u32 bits, u32 mask, int timeout)
-{
-       int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
-       IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
-                    addr, bits, mask,
-                    unlikely(ret  == -ETIMEDOUT) ? "timeout" : "", f, l);
-       return ret;
-}
-#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
-       __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
-       bits, mask, timeout)
-#else
-#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
-#endif
-
-static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
-       _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_set_bit(const char *f, u32 l,
-                                struct iwl_priv *priv, u32 reg, u32 mask)
-{
-       u32 val = _iwl_legacy_read32(priv, reg) | mask;
-       IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
-                                                       mask, val);
-       _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&p->reg_lock, reg_flags);
-       __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
-       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&p->reg_lock, reg_flags);
-       _iwl_legacy_set_bit(p, r, m);
-       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline void
-_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
-       _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_clear_bit(const char *f, u32 l,
-                                  struct iwl_priv *priv, u32 reg, u32 mask)
-{
-       u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
-       IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
-       _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&p->reg_lock, reg_flags);
-       __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
-       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&p->reg_lock, reg_flags);
-       _iwl_legacy_clear_bit(p, r, m);
-       spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
-{
-       int ret;
-       u32 val;
-
-       /* this bit wakes up the NIC */
-       _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
-                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
-       /*
-        * These bits say the device is running, and should keep running for
-        * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
-        * but they do not indicate that embedded SRAM is restored yet;
-        * 3945 and 4965 have volatile SRAM, and must save/restore contents
-        * to/from host DRAM when sleeping/waking for power-saving.
-        * Each direction takes approximately 1/4 millisecond; with this
-        * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
-        * series of register accesses are expected (e.g. reading Event Log),
-        * to keep device from sleeping.
-        *
-        * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
-        * SRAM is okay/restored.  We don't check that here because this call
-        * is just for hardware register access; but GP1 MAC_SLEEP check is a
-        * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
-        *
-        */
-       ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
-                          CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
-                          (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
-                           CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
-       if (ret < 0) {
-               val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
-               IWL_ERR(priv,
-                       "MAC is in deep sleep!.  CSR_GP_CNTRL = 0x%08X\n", val);
-               _iwl_legacy_write32(priv, CSR_RESET,
-                               CSR_RESET_REG_FLAG_FORCE_NMI);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
-                                              struct iwl_priv *priv)
-{
-       IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
-       return _iwl_legacy_grab_nic_access(priv);
-}
-#define iwl_grab_nic_access(priv) \
-       __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_grab_nic_access(priv) \
-       _iwl_legacy_grab_nic_access(priv)
-#endif
-
-static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
-{
-       _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
-                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
-                                           struct iwl_priv *priv)
-{
-
-       IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
-       _iwl_legacy_release_nic_access(priv);
-}
-#define iwl_release_nic_access(priv) \
-       __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_release_nic_access(priv) \
-       _iwl_legacy_release_nic_access(priv)
-#endif
-
-static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
-       return _iwl_legacy_read32(priv, reg);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
-                                       struct iwl_priv *priv, u32 reg)
-{
-       u32 value = _iwl_legacy_read_direct32(priv, reg);
-       IWL_DEBUG_IO(priv,
-                       "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
-                    f, l);
-       return value;
-}
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
-       u32 value;
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-       value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-       return value;
-}
-
-#else
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
-       u32 value;
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-       value = _iwl_legacy_read_direct32(priv, reg);
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-       return value;
-
-}
-#endif
-
-static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
-                                        u32 reg, u32 value)
-{
-       _iwl_legacy_write32(priv, reg, value);
-}
-static inline void
-iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       if (!iwl_grab_nic_access(priv)) {
-               _iwl_legacy_write_direct32(priv, reg, value);
-               iwl_release_nic_access(priv);
-       }
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
-                                              u32 reg, u32 len, u32 *values)
-{
-       u32 count = sizeof(u32);
-
-       if ((priv != NULL) && (values != NULL)) {
-               for (; 0 < len; len -= count, reg += count, values++)
-                       iwl_legacy_write_direct32(priv, reg, *values);
-       }
-}
-
-static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
-                                      u32 mask, int timeout)
-{
-       int t = 0;
-
-       do {
-               if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
-                       return t;
-               udelay(IWL_POLL_INTERVAL);
-               t += IWL_POLL_INTERVAL;
-       } while (t < timeout);
-
-       return -ETIMEDOUT;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
-                                           struct iwl_priv *priv,
-                                           u32 addr, u32 mask, int timeout)
-{
-       int ret  = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
-
-       if (unlikely(ret == -ETIMEDOUT))
-               IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
-                            "timedout - %s %d\n", addr, mask, f, l);
-       else
-               IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
-                            "- %s %d\n", addr, mask, ret, f, l);
-       return ret;
-}
-#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
-__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
-#else
-#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
-#endif
-
-static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
-       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
-       rmb();
-       return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
-}
-static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
-       unsigned long reg_flags;
-       u32 val;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-       val = _iwl_legacy_read_prph(priv, reg);
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-       return val;
-}
-
-static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
-                                            u32 addr, u32 val)
-{
-       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
-                             ((addr & 0x0000FFFF) | (3 << 24)));
-       wmb();
-       _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
-}
-
-static inline void
-iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       if (!iwl_grab_nic_access(priv)) {
-               _iwl_legacy_write_prph(priv, addr, val);
-               iwl_release_nic_access(priv);
-       }
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
-_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
-
-static inline void
-iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-       _iwl_legacy_set_bits_prph(priv, reg, mask);
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
-_iwl_legacy_write_prph(priv, reg,                              \
-                ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
-
-static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
-                               u32 bits, u32 mask)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-       _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
-                                                *priv, u32 reg, u32 mask)
-{
-       unsigned long reg_flags;
-       u32 val;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-       val = _iwl_legacy_read_prph(priv, reg);
-       _iwl_legacy_write_prph(priv, reg, (val & ~mask));
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
-{
-       unsigned long reg_flags;
-       u32 value;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       iwl_grab_nic_access(priv);
-
-       _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
-       rmb();
-       value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-
-       iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-       return value;
-}
-
-static inline void
-iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       if (!iwl_grab_nic_access(priv)) {
-               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
-               wmb();
-               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
-               iwl_release_nic_access(priv);
-       }
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void
-iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
-                                         u32 len, u32 *values)
-{
-       unsigned long reg_flags;
-
-       spin_lock_irqsave(&priv->reg_lock, reg_flags);
-       if (!iwl_grab_nic_access(priv)) {
-               _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
-               wmb();
-               for (; 0 < len; len -= sizeof(u32), values++)
-                       _iwl_legacy_write_direct32(priv,
-                                       HBUS_TARG_MEM_WDAT, *values);
-
-               iwl_release_nic_access(priv);
-       }
-       spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644 (file)
index dc568a4..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
-               "1=On(RF On)/Off(RF Off), 2=blinking");
-
-/* Throughput          OFF time(ms)    ON time (ms)
- *     >300                    25              25
- *     >200 to 300             40              40
- *     >100 to 200             55              55
- *     >70 to 100              65              65
- *     >50 to 70               75              75
- *     >20 to 50               85              85
- *     >10 to 20               95              95
- *     >5 to 10                110             110
- *     >1 to 5                 130             130
- *     >0 to 1                 167             167
- *     <=0                                     SOLID ON
- */
-static const struct ieee80211_tpt_blink iwl_blink[] = {
-       { .throughput = 0, .blink_time = 334 },
-       { .throughput = 1 * 1024 - 1, .blink_time = 260 },
-       { .throughput = 5 * 1024 - 1, .blink_time = 220 },
-       { .throughput = 10 * 1024 - 1, .blink_time = 190 },
-       { .throughput = 20 * 1024 - 1, .blink_time = 170 },
-       { .throughput = 50 * 1024 - 1, .blink_time = 150 },
-       { .throughput = 70 * 1024 - 1, .blink_time = 130 },
-       { .throughput = 100 * 1024 - 1, .blink_time = 110 },
-       { .throughput = 200 * 1024 - 1, .blink_time = 80 },
-       { .throughput = 300 * 1024 - 1, .blink_time = 50 },
-};
-
-/*
- * Adjust led blink rate to compensate on a MAC Clock difference on every HW
- * Led blink rate analysis showed an average deviation of 0% on 3945,
- * 5% on 4965 HW.
- * Need to compensate on the led on/off time per HW according to the deviation
- * to achieve the desired led frequency
- * The calculation is: (100-averageDeviation)/100 * blinkTime
- * For code efficiency the calculation will be:
- *     compensation = (100 - averageDeviation) * 64 / 100
- *     NewBlinkTime = (compensation * BlinkTime) / 64
- */
-static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
-                                   u8 time, u16 compensation)
-{
-       if (!compensation) {
-               IWL_ERR(priv, "undefined blink compensation: "
-                       "use pre-defined blinking time\n");
-               return time;
-       }
-
-       return (u8)((time * compensation) >> 6);
-}
-
-/* Set led pattern command */
-static int iwl_legacy_led_cmd(struct iwl_priv *priv,
-                      unsigned long on,
-                      unsigned long off)
-{
-       struct iwl_led_cmd led_cmd = {
-               .id = IWL_LED_LINK,
-               .interval = IWL_DEF_LED_INTRVL
-       };
-       int ret;
-
-       if (!test_bit(STATUS_READY, &priv->status))
-               return -EBUSY;
-
-       if (priv->blink_on == on && priv->blink_off == off)
-               return 0;
-
-       if (off == 0) {
-               /* led is SOLID_ON */
-               on = IWL_LED_SOLID;
-       }
-
-       IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
-                       priv->cfg->base_params->led_compensation);
-       led_cmd.on = iwl_legacy_blink_compensation(priv, on,
-                               priv->cfg->base_params->led_compensation);
-       led_cmd.off = iwl_legacy_blink_compensation(priv, off,
-                               priv->cfg->base_params->led_compensation);
-
-       ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
-       if (!ret) {
-               priv->blink_on = on;
-               priv->blink_off = off;
-       }
-       return ret;
-}
-
-static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
-                                  enum led_brightness brightness)
-{
-       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-       unsigned long on = 0;
-
-       if (brightness > 0)
-               on = IWL_LED_SOLID;
-
-       iwl_legacy_led_cmd(priv, on, 0);
-}
-
-static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
-                            unsigned long *delay_on,
-                            unsigned long *delay_off)
-{
-       struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-
-       return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
-}
-
-void iwl_legacy_leds_init(struct iwl_priv *priv)
-{
-       int mode = led_mode;
-       int ret;
-
-       if (mode == IWL_LED_DEFAULT)
-               mode = priv->cfg->led_mode;
-
-       priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
-                                  wiphy_name(priv->hw->wiphy));
-       priv->led.brightness_set = iwl_legacy_led_brightness_set;
-       priv->led.blink_set = iwl_legacy_led_blink_set;
-       priv->led.max_brightness = 1;
-
-       switch (mode) {
-       case IWL_LED_DEFAULT:
-               WARN_ON(1);
-               break;
-       case IWL_LED_BLINK:
-               priv->led.default_trigger =
-                       ieee80211_create_tpt_led_trigger(priv->hw,
-                                       IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
-                                       iwl_blink, ARRAY_SIZE(iwl_blink));
-               break;
-       case IWL_LED_RF_STATE:
-               priv->led.default_trigger =
-                       ieee80211_get_radio_led_name(priv->hw);
-               break;
-       }
-
-       ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
-       if (ret) {
-               kfree(priv->led.name);
-               return;
-       }
-
-       priv->led_registered = true;
-}
-EXPORT_SYMBOL(iwl_legacy_leds_init);
-
-void iwl_legacy_leds_exit(struct iwl_priv *priv)
-{
-       if (!priv->led_registered)
-               return;
-
-       led_classdev_unregister(&priv->led);
-       kfree(priv->led.name);
-}
-EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644 (file)
index f0791f7..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_leds_h__
-#define __iwl_legacy_leds_h__
-
-
-struct iwl_priv;
-
-#define IWL_LED_SOLID 11
-#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
-
-#define IWL_LED_ACTIVITY       (0<<1)
-#define IWL_LED_LINK           (1<<1)
-
-/*
- * LED mode
- *    IWL_LED_DEFAULT:  use device default
- *    IWL_LED_RF_STATE: turn LED on/off based on RF state
- *                     LED ON  = RF ON
- *                     LED OFF = RF OFF
- *    IWL_LED_BLINK:    adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
-       IWL_LED_DEFAULT,
-       IWL_LED_RF_STATE,
-       IWL_LED_BLINK,
-};
-
-void iwl_legacy_leds_init(struct iwl_priv *priv);
-void iwl_legacy_leds_exit(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644 (file)
index 38647e4..0000000
+++ /dev/null
@@ -1,456 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_rs_h__
-#define __iwl_legacy_rs_h__
-
-struct iwl_rate_info {
-       u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
-       u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
-       u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
-       u8 ieee;        /* MAC header:  IWL_RATE_6M_IEEE, etc. */
-       u8 prev_ieee;    /* previous rate in IEEE speeds */
-       u8 next_ieee;    /* next rate in IEEE speeds */
-       u8 prev_rs;      /* previous rate used in rs algo */
-       u8 next_rs;      /* next rate used in rs algo */
-       u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
-       u8 next_rs_tgg;  /* next rate used in TGG rs algo */
-};
-
-struct iwl3945_rate_info {
-       u8 plcp;                /* uCode API:  IWL_RATE_6M_PLCP, etc. */
-       u8 ieee;                /* MAC header:  IWL_RATE_6M_IEEE, etc. */
-       u8 prev_ieee;           /* previous rate in IEEE speeds */
-       u8 next_ieee;           /* next rate in IEEE speeds */
-       u8 prev_rs;             /* previous rate used in rs algo */
-       u8 next_rs;             /* next rate used in rs algo */
-       u8 prev_rs_tgg;         /* previous rate used in TGG rs algo */
-       u8 next_rs_tgg;         /* next rate used in TGG rs algo */
-       u8 table_rs_index;      /* index in rate scale table cmd */
-       u8 prev_table_rs;       /* prev in rate table cmd */
-};
-
-
-/*
- * These serve as indexes into
- * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
- */
-enum {
-       IWL_RATE_1M_INDEX = 0,
-       IWL_RATE_2M_INDEX,
-       IWL_RATE_5M_INDEX,
-       IWL_RATE_11M_INDEX,
-       IWL_RATE_6M_INDEX,
-       IWL_RATE_9M_INDEX,
-       IWL_RATE_12M_INDEX,
-       IWL_RATE_18M_INDEX,
-       IWL_RATE_24M_INDEX,
-       IWL_RATE_36M_INDEX,
-       IWL_RATE_48M_INDEX,
-       IWL_RATE_54M_INDEX,
-       IWL_RATE_60M_INDEX,
-       IWL_RATE_COUNT,
-       IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1,     /* Excluding 60M */
-       IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
-       IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
-       IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
-enum {
-       IWL_RATE_6M_INDEX_TABLE = 0,
-       IWL_RATE_9M_INDEX_TABLE,
-       IWL_RATE_12M_INDEX_TABLE,
-       IWL_RATE_18M_INDEX_TABLE,
-       IWL_RATE_24M_INDEX_TABLE,
-       IWL_RATE_36M_INDEX_TABLE,
-       IWL_RATE_48M_INDEX_TABLE,
-       IWL_RATE_54M_INDEX_TABLE,
-       IWL_RATE_1M_INDEX_TABLE,
-       IWL_RATE_2M_INDEX_TABLE,
-       IWL_RATE_5M_INDEX_TABLE,
-       IWL_RATE_11M_INDEX_TABLE,
-       IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
-};
-
-enum {
-       IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
-       IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
-       IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
-       IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
-       IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-};
-
-/* #define vs. enum to keep from defaulting to 'large integer' */
-#define        IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
-#define        IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
-#define        IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
-#define        IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
-#define        IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
-#define        IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
-#define        IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
-#define        IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
-#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
-#define        IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
-#define        IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
-#define        IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
-#define        IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
-
-/* uCode API values for legacy bit rates, both OFDM and CCK */
-enum {
-       IWL_RATE_6M_PLCP  = 13,
-       IWL_RATE_9M_PLCP  = 15,
-       IWL_RATE_12M_PLCP = 5,
-       IWL_RATE_18M_PLCP = 7,
-       IWL_RATE_24M_PLCP = 9,
-       IWL_RATE_36M_PLCP = 11,
-       IWL_RATE_48M_PLCP = 1,
-       IWL_RATE_54M_PLCP = 3,
-       IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
-       IWL_RATE_1M_PLCP  = 10,
-       IWL_RATE_2M_PLCP  = 20,
-       IWL_RATE_5M_PLCP  = 55,
-       IWL_RATE_11M_PLCP = 110,
-       /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
-};
-
-/* uCode API values for OFDM high-throughput (HT) bit rates */
-enum {
-       IWL_RATE_SISO_6M_PLCP = 0,
-       IWL_RATE_SISO_12M_PLCP = 1,
-       IWL_RATE_SISO_18M_PLCP = 2,
-       IWL_RATE_SISO_24M_PLCP = 3,
-       IWL_RATE_SISO_36M_PLCP = 4,
-       IWL_RATE_SISO_48M_PLCP = 5,
-       IWL_RATE_SISO_54M_PLCP = 6,
-       IWL_RATE_SISO_60M_PLCP = 7,
-       IWL_RATE_MIMO2_6M_PLCP  = 0x8,
-       IWL_RATE_MIMO2_12M_PLCP = 0x9,
-       IWL_RATE_MIMO2_18M_PLCP = 0xa,
-       IWL_RATE_MIMO2_24M_PLCP = 0xb,
-       IWL_RATE_MIMO2_36M_PLCP = 0xc,
-       IWL_RATE_MIMO2_48M_PLCP = 0xd,
-       IWL_RATE_MIMO2_54M_PLCP = 0xe,
-       IWL_RATE_MIMO2_60M_PLCP = 0xf,
-       IWL_RATE_SISO_INVM_PLCP,
-       IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-};
-
-/* MAC header values for bit rates */
-enum {
-       IWL_RATE_6M_IEEE  = 12,
-       IWL_RATE_9M_IEEE  = 18,
-       IWL_RATE_12M_IEEE = 24,
-       IWL_RATE_18M_IEEE = 36,
-       IWL_RATE_24M_IEEE = 48,
-       IWL_RATE_36M_IEEE = 72,
-       IWL_RATE_48M_IEEE = 96,
-       IWL_RATE_54M_IEEE = 108,
-       IWL_RATE_60M_IEEE = 120,
-       IWL_RATE_1M_IEEE  = 2,
-       IWL_RATE_2M_IEEE  = 4,
-       IWL_RATE_5M_IEEE  = 11,
-       IWL_RATE_11M_IEEE = 22,
-};
-
-#define IWL_CCK_BASIC_RATES_MASK    \
-       (IWL_RATE_1M_MASK          | \
-       IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK          \
-       (IWL_CCK_BASIC_RATES_MASK  | \
-       IWL_RATE_5M_MASK          | \
-       IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK   \
-       (IWL_RATE_6M_MASK         | \
-       IWL_RATE_12M_MASK         | \
-       IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK         \
-       (IWL_OFDM_BASIC_RATES_MASK | \
-       IWL_RATE_9M_MASK          | \
-       IWL_RATE_18M_MASK         | \
-       IWL_RATE_36M_MASK         | \
-       IWL_RATE_48M_MASK         | \
-       IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK         \
-       (IWL_OFDM_BASIC_RATES_MASK | \
-        IWL_CCK_BASIC_RATES_MASK)
-
-#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
-#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
-
-#define IWL_INVALID_VALUE    -1
-
-#define IWL_MIN_RSSI_VAL                 -100
-#define IWL_MAX_RSSI_VAL                    0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT       160
-#define IWL_LEGACY_SUCCESS_LIMIT       480
-#define IWL_LEGACY_TABLE_COUNT         160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT  400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT  4500
-#define IWL_NONE_LEGACY_TABLE_COUNT    1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO              12800   /* 100% */
-#define IWL_RATE_SCALE_SWITCH          10880   /*  85% */
-#define IWL_RATE_HIGH_TH               10880   /*  85% */
-#define IWL_RATE_INCREASE_TH           6400    /*  50% */
-#define IWL_RATE_DECREASE_TH           1920    /*  15% */
-
-/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1      0
-#define IWL_LEGACY_SWITCH_ANTENNA2      1
-#define IWL_LEGACY_SWITCH_SISO          2
-#define IWL_LEGACY_SWITCH_MIMO2_AB      3
-#define IWL_LEGACY_SWITCH_MIMO2_AC      4
-#define IWL_LEGACY_SWITCH_MIMO2_BC      5
-
-/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1        0
-#define IWL_SISO_SWITCH_ANTENNA2        1
-#define IWL_SISO_SWITCH_MIMO2_AB        2
-#define IWL_SISO_SWITCH_MIMO2_AC        3
-#define IWL_SISO_SWITCH_MIMO2_BC        4
-#define IWL_SISO_SWITCH_GI              5
-
-/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1       0
-#define IWL_MIMO2_SWITCH_ANTENNA2       1
-#define IWL_MIMO2_SWITCH_SISO_A         2
-#define IWL_MIMO2_SWITCH_SISO_B         3
-#define IWL_MIMO2_SWITCH_SISO_C         4
-#define IWL_MIMO2_SWITCH_GI             5
-
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
-
-#define IWL_ACTION_LIMIT               3       /* # possible actions */
-
-#define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
-
-/* load per tid defines for A-MPDU activation */
-#define IWL_AGG_TPT_THREHOLD   0
-#define IWL_AGG_LOAD_THRESHOLD 10
-#define IWL_AGG_ALL_TID                0xff
-#define TID_QUEUE_CELL_SPACING 50      /*mS */
-#define TID_QUEUE_MAX_SIZE     20
-#define TID_ROUND_VALUE                5       /* mS */
-#define TID_MAX_LOAD_COUNT     8
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
-
-extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
-
-enum iwl_table_type {
-       LQ_NONE,
-       LQ_G,           /* legacy types */
-       LQ_A,
-       LQ_SISO,        /* high-throughput types */
-       LQ_MIMO2,
-       LQ_MAX,
-};
-
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
-
-#define        ANT_NONE        0x0
-#define        ANT_A           BIT(0)
-#define        ANT_B           BIT(1)
-#define        ANT_AB          (ANT_A | ANT_B)
-#define ANT_C          BIT(2)
-#define        ANT_AC          (ANT_A | ANT_C)
-#define ANT_BC         (ANT_B | ANT_C)
-#define ANT_ABC                (ANT_AB | ANT_C)
-
-#define IWL_MAX_MCS_DISPLAY_SIZE       12
-
-struct iwl_rate_mcs_info {
-       char    mbps[IWL_MAX_MCS_DISPLAY_SIZE];
-       char    mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
-       u64 data;               /* bitmap of successful frames */
-       s32 success_counter;    /* number of frames successful */
-       s32 success_ratio;      /* per-cent * 128  */
-       s32 counter;            /* number of frames attempted */
-       s32 average_tpt;        /* success ratio * expected throughput */
-       unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
-       enum iwl_table_type lq_type;
-       u8 ant_type;
-       u8 is_SGI;      /* 1 = short guard interval */
-       u8 is_ht40;     /* 1 = 40 MHz channel width */
-       u8 is_dup;      /* 1 = duplicated data streams */
-       u8 action;      /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
-       u8 max_search;  /* maximun number of tables we can search */
-       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
-       u32 current_rate;  /* rate_n_flags, uCode API format */
-       struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
-       unsigned long time_stamp;       /* age of the oldest statistics */
-       u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
-                                                * slice */
-       u32 total;                      /* total num of packets during the
-                                        * last TID_MAX_TIME_DIFF */
-       u8 queue_count;                 /* number of queues that has
-                                        * been used since the last cleanup */
-       u8 head;                        /* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
-       u8 active_tbl;          /* index of active table, range 0-1 */
-       u8 enable_counter;      /* indicates HT mode */
-       u8 stay_in_tbl;         /* 1: disallow, 0: allow search for new mode */
-       u8 search_better_tbl;   /* 1: currently trying alternate mode */
-       s32 last_tpt;
-
-       /* The following determine when to search for a new mode */
-       u32 table_count_limit;
-       u32 max_failure_limit;  /* # failed frames before new search */
-       u32 max_success_limit;  /* # successful frames before new search */
-       u32 table_count;
-       u32 total_failed;       /* total failed frames, any/all rates */
-       u32 total_success;      /* total successful frames, any/all rates */
-       u64 flush_timer;        /* time staying in mode before new search */
-
-       u8 action_counter;      /* # mode-switch actions tried */
-       u8 is_green;
-       u8 is_dup;
-       enum ieee80211_band band;
-
-       /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
-       u32 supp_rates;
-       u16 active_legacy_rate;
-       u16 active_siso_rate;
-       u16 active_mimo2_rate;
-       s8 max_rate_idx;     /* Max rate set by user */
-       u8 missed_rate_counter;
-
-       struct iwl_link_quality_cmd lq;
-       struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
-       struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
-       u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
-       struct dentry *rs_sta_dbgfs_scale_table_file;
-       struct dentry *rs_sta_dbgfs_stats_table_file;
-       struct dentry *rs_sta_dbgfs_rate_scale_data_file;
-       struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
-       u32 dbg_fixed_rate;
-#endif
-       struct iwl_priv *drv;
-
-       /* used to be in sta_info */
-       int last_txrate_idx;
-       /* last tx rate_n_flags */
-       u32 last_rate_n_flags;
-       /* packets destined for this STA are aggregated */
-       u8 is_agg;
-};
-
-static inline u8 iwl4965_num_of_ant(u8 mask)
-{
-       return  !!((mask) & ANT_A) +
-               !!((mask) & ANT_B) +
-               !!((mask) & ANT_C);
-}
-
-static inline u8 iwl4965_first_antenna(u8 mask)
-{
-       if (mask & ANT_A)
-               return ANT_A;
-       if (mask & ANT_B)
-               return ANT_B;
-       return ANT_C;
-}
-
-
-/**
- * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
- *
- * The specific throughput table used is based on the type of network
- * the associated with, including A, B, G, and G w/ TGG protection
- */
-extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
-
-/* Initialize station's rate scaling information after adding station */
-extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
-                            struct ieee80211_sta *sta, u8 sta_id);
-extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
-                                struct ieee80211_sta *sta, u8 sta_id);
-
-/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
- *
- * Since the rate control algorithm is hardware specific, there is no need
- * or reason to place it as a stand alone module.  The driver can call
- * iwl_rate_control_register in order to register the rate control callbacks
- * with the mac80211 subsystem.  This should be performed prior to calling
- * ieee80211_register_hw
- *
- */
-extern int iwl4965_rate_control_register(void);
-extern int iwl3945_rate_control_register(void);
-
-/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
- *
- * This should be called after calling ieee80211_unregister_hw, but before
- * the driver is unloaded.
- */
-extern void iwl4965_rate_control_unregister(void);
-extern void iwl3945_rate_control_unregister(void);
-
-#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644 (file)
index 903ef0d..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-
-/*
- * Setting power level allows the card to go to sleep when not busy.
- *
- * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
- */
-
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct iwl_power_vec_entry {
-       struct iwl_powertable_cmd cmd;
-       u8 no_dtim;     /* number of skip dtim */
-};
-
-static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
-                                   struct iwl_powertable_cmd *cmd)
-{
-       memset(cmd, 0, sizeof(*cmd));
-
-       if (priv->power_data.pci_pm)
-               cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
-       IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
-}
-
-static int
-iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
-{
-       IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
-       IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
-       IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
-                                       le32_to_cpu(cmd->tx_data_timeout));
-       IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
-                                       le32_to_cpu(cmd->rx_data_timeout));
-       IWL_DEBUG_POWER(priv,
-                       "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
-                       le32_to_cpu(cmd->sleep_interval[0]),
-                       le32_to_cpu(cmd->sleep_interval[1]),
-                       le32_to_cpu(cmd->sleep_interval[2]),
-                       le32_to_cpu(cmd->sleep_interval[3]),
-                       le32_to_cpu(cmd->sleep_interval[4]));
-
-       return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
-                               sizeof(struct iwl_powertable_cmd), cmd);
-}
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
-                      bool force)
-{
-       int ret;
-       bool update_chains;
-
-       lockdep_assert_held(&priv->mutex);
-
-       /* Don't update the RX chain when chain noise calibration is running */
-       update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
-                       priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
-       if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
-               return 0;
-
-       if (!iwl_legacy_is_ready_rf(priv))
-               return -EIO;
-
-       /* scan complete use sleep_power_next, need to be updated */
-       memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
-       if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
-               IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
-               return 0;
-       }
-
-       if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
-               set_bit(STATUS_POWER_PMI, &priv->status);
-
-       ret = iwl_legacy_set_power(priv, cmd);
-       if (!ret) {
-               if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
-                       clear_bit(STATUS_POWER_PMI, &priv->status);
-
-               if (priv->cfg->ops->lib->update_chain_flags && update_chains)
-                       priv->cfg->ops->lib->update_chain_flags(priv);
-               else if (priv->cfg->ops->lib->update_chain_flags)
-                       IWL_DEBUG_POWER(priv,
-                                       "Cannot update the power, chain noise "
-                                       "calibration running: %d\n",
-                                       priv->chain_noise_data.state);
-
-               memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
-       } else
-               IWL_ERR(priv, "set power fail, ret = %d", ret);
-
-       return ret;
-}
-
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
-{
-       struct iwl_powertable_cmd cmd;
-
-       iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
-       return iwl_legacy_power_set_mode(priv, &cmd, force);
-}
-EXPORT_SYMBOL(iwl_legacy_power_update_mode);
-
-/* initialize to default */
-void iwl_legacy_power_initialize(struct iwl_priv *priv)
-{
-       u16 lctl = iwl_legacy_pcie_link_ctl(priv);
-
-       priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-
-       priv->power_data.debug_sleep_level_override = -1;
-
-       memset(&priv->power_data.sleep_cmd, 0,
-               sizeof(priv->power_data.sleep_cmd));
-}
-EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644 (file)
index d30b36a..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_legacy_power_setting_h__
-#define __iwl_legacy_power_setting_h__
-
-#include "iwl-commands.h"
-
-enum iwl_power_level {
-       IWL_POWER_INDEX_1,
-       IWL_POWER_INDEX_2,
-       IWL_POWER_INDEX_3,
-       IWL_POWER_INDEX_4,
-       IWL_POWER_INDEX_5,
-       IWL_POWER_NUM
-};
-
-struct iwl_power_mgr {
-       struct iwl_powertable_cmd sleep_cmd;
-       struct iwl_powertable_cmd sleep_cmd_next;
-       int debug_sleep_level_override;
-       bool pci_pm;
-};
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
-                      bool force);
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
-void iwl_legacy_power_initialize(struct iwl_priv *priv);
-
-#endif  /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644 (file)
index f4d21ec..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC.  These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC.  The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt.  The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
- *   were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_legacy_rx_queue_alloc()   Allocates rx_free
- * iwl_rx_replenish()     Replenishes rx_free list from rx_used, and calls
- *                            iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx()         Detach iwl_rx_mem_buffers from pool up to the
- *                            READ INDEX, detaching the SKB from the pool.
- *                            Moves the packet buffer from queue to rx_used.
- *                            Calls iwl_rx_queue_restock to refill any empty
- *                            slots.
- * ...
- *
- */
-
-/**
- * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
-{
-       int s = q->read - q->write;
-       if (s <= 0)
-               s += RX_QUEUE_SIZE;
-       /* keep some buffer to not confuse full and empty queue */
-       s -= 2;
-       if (s < 0)
-               s = 0;
-       return s;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
-
-/**
- * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void
-iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
-                                       struct iwl_rx_queue *q)
-{
-       unsigned long flags;
-       u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
-       u32 reg;
-
-       spin_lock_irqsave(&q->lock, flags);
-
-       if (q->need_update == 0)
-               goto exit_unlock;
-
-       /* If power-saving is in use, make sure device is awake */
-       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-                       IWL_DEBUG_INFO(priv,
-                               "Rx queue requesting wakeup,"
-                               " GP1 = 0x%x\n", reg);
-                       iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
-                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       goto exit_unlock;
-               }
-
-               q->write_actual = (q->write & ~0x7);
-               iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
-                               q->write_actual);
-
-       /* Else device is assumed to be awake */
-       } else {
-               /* Device expects a multiple of 8 */
-               q->write_actual = (q->write & ~0x7);
-               iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
-                       q->write_actual);
-       }
-
-       q->need_update = 0;
-
- exit_unlock:
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
-
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct device *dev = &priv->pci_dev->dev;
-       int i;
-
-       spin_lock_init(&rxq->lock);
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
-
-       /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
-       rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
-                                    GFP_KERNEL);
-       if (!rxq->bd)
-               goto err_bd;
-
-       rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
-                                         &rxq->rb_stts_dma, GFP_KERNEL);
-       if (!rxq->rb_stts)
-               goto err_rb;
-
-       /* Fill the rx_used queue with _all_ of the Rx buffers */
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
-               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
-       /* Set us so that we have processed and used all buffers, but have
-        * not restocked the Rx queue with fresh buffers */
-       rxq->read = rxq->write = 0;
-       rxq->write_actual = 0;
-       rxq->free_count = 0;
-       rxq->need_update = 0;
-       return 0;
-
-err_rb:
-       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-                         rxq->bd_dma);
-err_bd:
-       return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
-
-
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
-                                         struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
-       if (!report->state) {
-               IWL_DEBUG_11H(priv,
-                       "Spectrum Measure Notification: Start\n");
-               return;
-       }
-
-       memcpy(&priv->measure_report, report, sizeof(*report));
-       priv->measurement_status |= MEASUREMENT_READY;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-
-/*
- * returns non-zero if packet should be dropped
- */
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
-                          struct ieee80211_hdr *hdr,
-                          u32 decrypt_res,
-                          struct ieee80211_rx_status *stats)
-{
-       u16 fc = le16_to_cpu(hdr->frame_control);
-
-       /*
-        * All contexts have the same setting here due to it being
-        * a module parameter, so OK to check any context.
-        */
-       if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
-                                               RXON_FILTER_DIS_DECRYPT_MSK)
-               return 0;
-
-       if (!(fc & IEEE80211_FCTL_PROTECTED))
-               return 0;
-
-       IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
-       switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
-       case RX_RES_STATUS_SEC_TYPE_TKIP:
-               /* The uCode has got a bad phase 1 Key, pushes the packet.
-                * Decryption will be done in SW. */
-               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
-                   RX_RES_STATUS_BAD_KEY_TTAK)
-                       break;
-
-       case RX_RES_STATUS_SEC_TYPE_WEP:
-               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
-                   RX_RES_STATUS_BAD_ICV_MIC) {
-                       /* bad ICV, the packet is destroyed since the
-                        * decryption is inplace, drop it */
-                       IWL_DEBUG_RX(priv, "Packet destroyed\n");
-                       return -1;
-               }
-       case RX_RES_STATUS_SEC_TYPE_CCMP:
-               if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
-                   RX_RES_STATUS_DECRYPT_OK) {
-                       IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
-                       stats->flag |= RX_FLAG_DECRYPTED;
-               }
-               break;
-
-       default:
-               break;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644 (file)
index 521b73b..0000000
+++ /dev/null
@@ -1,550 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
- * sending probe req.  This should be set long enough to hear probe responses
- * from more than one AP.  */
-#define IWL_ACTIVE_DWELL_TIME_24    (30)       /* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52    (20)
-
-#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
-#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
-
-/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
- * Must be set longer than active dwell time.
- * For the most reliable scan, set > AP beacon interval (typically 100msec). */
-#define IWL_PASSIVE_DWELL_TIME_24   (20)       /* all times in msec */
-#define IWL_PASSIVE_DWELL_TIME_52   (10)
-#define IWL_PASSIVE_DWELL_BASE      (100)
-#define IWL_CHANNEL_TUNE_TIME       5
-
-static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
-{
-       int ret;
-       struct iwl_rx_packet *pkt;
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_SCAN_ABORT_CMD,
-               .flags = CMD_WANT_SKB,
-       };
-
-       /* Exit instantly with error when device is not ready
-        * to receive scan abort command or it does not perform
-        * hardware scan currently */
-       if (!test_bit(STATUS_READY, &priv->status) ||
-           !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
-           !test_bit(STATUS_SCAN_HW, &priv->status) ||
-           test_bit(STATUS_FW_ERROR, &priv->status) ||
-           test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return -EIO;
-
-       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
-       if (ret)
-               return ret;
-
-       pkt = (struct iwl_rx_packet *)cmd.reply_page;
-       if (pkt->u.status != CAN_ABORT_STATUS) {
-               /* The scan abort will return 1 for success or
-                * 2 for "failure".  A failure condition can be
-                * due to simply not being in an active scan which
-                * can occur if we send the scan abort before we
-                * the microcode has notified us that a scan is
-                * completed. */
-               IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
-               ret = -EIO;
-       }
-
-       iwl_legacy_free_pages(priv, cmd.reply_page);
-       return ret;
-}
-
-static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
-{
-       /* check if scan was requested from mac80211 */
-       if (priv->scan_request) {
-               IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
-               ieee80211_scan_completed(priv->hw, aborted);
-       }
-
-       priv->scan_vif = NULL;
-       priv->scan_request = NULL;
-}
-
-void iwl_legacy_force_scan_end(struct iwl_priv *priv)
-{
-       lockdep_assert_held(&priv->mutex);
-
-       if (!test_bit(STATUS_SCANNING, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
-               return;
-       }
-
-       IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
-       clear_bit(STATUS_SCANNING, &priv->status);
-       clear_bit(STATUS_SCAN_HW, &priv->status);
-       clear_bit(STATUS_SCAN_ABORTING, &priv->status);
-       iwl_legacy_complete_scan(priv, true);
-}
-
-static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
-{
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!test_bit(STATUS_SCANNING, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
-               return;
-       }
-
-       if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
-               return;
-       }
-
-       ret = iwl_legacy_send_scan_abort(priv);
-       if (ret) {
-               IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
-               iwl_legacy_force_scan_end(priv);
-       } else
-               IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
-}
-
-/**
- * iwl_scan_cancel - Cancel any currently executing HW scan
- */
-int iwl_legacy_scan_cancel(struct iwl_priv *priv)
-{
-       IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
-       queue_work(priv->workqueue, &priv->abort_scan);
-       return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel);
-
-/**
- * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
- * @ms: amount of time to wait (in milliseconds) for scan to abort
- *
- */
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(ms);
-
-       lockdep_assert_held(&priv->mutex);
-
-       IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
-
-       iwl_legacy_do_scan_abort(priv);
-
-       while (time_before_eq(jiffies, timeout)) {
-               if (!test_bit(STATUS_SCAN_HW, &priv->status))
-                       break;
-               msleep(20);
-       }
-
-       return test_bit(STATUS_SCAN_HW, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
-
-/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
-                             struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scanreq_notification *notif =
-           (struct iwl_scanreq_notification *)pkt->u.raw;
-
-       IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
-#endif
-}
-
-/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scanstart_notification *notif =
-           (struct iwl_scanstart_notification *)pkt->u.raw;
-       priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
-       IWL_DEBUG_SCAN(priv, "Scan start: "
-                      "%d [802.11%s] "
-                      "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
-                      notif->channel,
-                      notif->band ? "bg" : "a",
-                      le32_to_cpu(notif->tsf_high),
-                      le32_to_cpu(notif->tsf_low),
-                      notif->status, notif->beacon_timer);
-}
-
-/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scanresults_notification *notif =
-           (struct iwl_scanresults_notification *)pkt->u.raw;
-
-       IWL_DEBUG_SCAN(priv, "Scan ch.res: "
-                      "%d [802.11%s] "
-                      "(TSF: 0x%08X:%08X) - %d "
-                      "elapsed=%lu usec\n",
-                      notif->channel,
-                      notif->band ? "bg" : "a",
-                      le32_to_cpu(notif->tsf_high),
-                      le32_to_cpu(notif->tsf_low),
-                      le32_to_cpu(notif->statistics[0]),
-                      le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
-#endif
-}
-
-/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_mem_buffer *rxb)
-{
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
-
-       IWL_DEBUG_SCAN(priv,
-                       "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
-                      scan_notif->scanned_channels,
-                      scan_notif->tsf_low,
-                      scan_notif->tsf_high, scan_notif->status);
-
-       /* The HW is no longer scanning */
-       clear_bit(STATUS_SCAN_HW, &priv->status);
-
-       IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
-                      (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
-                      jiffies_to_msecs(jiffies - priv->scan_start));
-
-       queue_work(priv->workqueue, &priv->scan_completed);
-}
-
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
-{
-       /* scan handlers */
-       priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
-       priv->rx_handlers[SCAN_START_NOTIFICATION] =
-                                       iwl_legacy_rx_scan_start_notif;
-       priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
-                                       iwl_legacy_rx_scan_results_notif;
-       priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
-                                       iwl_legacy_rx_scan_complete_notif;
-}
-EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
-
-inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
-                                    enum ieee80211_band band,
-                                    u8 n_probes)
-{
-       if (band == IEEE80211_BAND_5GHZ)
-               return IWL_ACTIVE_DWELL_TIME_52 +
-                       IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
-       else
-               return IWL_ACTIVE_DWELL_TIME_24 +
-                       IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
-}
-EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
-
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
-                              enum ieee80211_band band,
-                              struct ieee80211_vif *vif)
-{
-       struct iwl_rxon_context *ctx;
-       u16 passive = (band == IEEE80211_BAND_2GHZ) ?
-           IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
-           IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
-
-       if (iwl_legacy_is_any_associated(priv)) {
-               /*
-                * If we're associated, we clamp the maximum passive
-                * dwell time to be 98% of the smallest beacon interval
-                * (minus 2 * channel tune time)
-                */
-               for_each_context(priv, ctx) {
-                       u16 value;
-
-                       if (!iwl_legacy_is_associated_ctx(ctx))
-                               continue;
-                       value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
-                       if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
-                               value = IWL_PASSIVE_DWELL_BASE;
-                       value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
-                       passive = min(value, passive);
-               }
-       }
-
-       return passive;
-}
-EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
-
-void iwl_legacy_init_scan_params(struct iwl_priv *priv)
-{
-       u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
-       if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
-               priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
-       if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
-               priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
-}
-EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-
-static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
-                                   struct ieee80211_vif *vif)
-{
-       int ret;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (WARN_ON(!priv->cfg->ops->utils->request_scan))
-               return -EOPNOTSUPP;
-
-       cancel_delayed_work(&priv->scan_check);
-
-       if (!iwl_legacy_is_ready_rf(priv)) {
-               IWL_WARN(priv, "Request scan called when driver not ready.\n");
-               return -EIO;
-       }
-
-       if (test_bit(STATUS_SCAN_HW, &priv->status)) {
-               IWL_DEBUG_SCAN(priv,
-                       "Multiple concurrent scan requests in parallel.\n");
-               return -EBUSY;
-       }
-
-       if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
-               return -EBUSY;
-       }
-
-       IWL_DEBUG_SCAN(priv, "Starting scan...\n");
-
-       set_bit(STATUS_SCANNING, &priv->status);
-       priv->scan_start = jiffies;
-
-       ret = priv->cfg->ops->utils->request_scan(priv, vif);
-       if (ret) {
-               clear_bit(STATUS_SCANNING, &priv->status);
-               return ret;
-       }
-
-       queue_delayed_work(priv->workqueue, &priv->scan_check,
-                          IWL_SCAN_CHECK_WATCHDOG);
-
-       return 0;
-}
-
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif,
-                   struct cfg80211_scan_request *req)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (req->n_channels == 0)
-               return -EINVAL;
-
-       mutex_lock(&priv->mutex);
-
-       if (test_bit(STATUS_SCANNING, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
-               ret = -EAGAIN;
-               goto out_unlock;
-       }
-
-       /* mac80211 will only ask for one band at a time */
-       priv->scan_request = req;
-       priv->scan_vif = vif;
-       priv->scan_band = req->channels[0]->band;
-
-       ret = iwl_legacy_scan_initiate(priv, vif);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out_unlock:
-       mutex_unlock(&priv->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-
-static void iwl_legacy_bg_scan_check(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, scan_check.work);
-
-       IWL_DEBUG_SCAN(priv, "Scan check work\n");
-
-       /* Since we are here firmware does not finish scan and
-        * most likely is in bad shape, so we don't bother to
-        * send abort command, just force scan complete to mac80211 */
-       mutex_lock(&priv->mutex);
-       iwl_legacy_force_scan_end(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16
-iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
-                      const u8 *ta, const u8 *ies, int ie_len, int left)
-{
-       int len = 0;
-       u8 *pos = NULL;
-
-       /* Make sure there is enough space for the probe request,
-        * two mandatory IEs and the data */
-       left -= 24;
-       if (left < 0)
-               return 0;
-
-       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-       memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
-       memcpy(frame->sa, ta, ETH_ALEN);
-       memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
-       frame->seq_ctrl = 0;
-
-       len += 24;
-
-       /* ...next IE... */
-       pos = &frame->u.probe_req.variable[0];
-
-       /* fill in our indirect SSID IE */
-       left -= 2;
-       if (left < 0)
-               return 0;
-       *pos++ = WLAN_EID_SSID;
-       *pos++ = 0;
-
-       len += 2;
-
-       if (WARN_ON(left < ie_len))
-               return len;
-
-       if (ies && ie_len) {
-               memcpy(pos, ies, ie_len);
-               len += ie_len;
-       }
-
-       return (u16)len;
-}
-EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
-
-static void iwl_legacy_bg_abort_scan(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
-
-       IWL_DEBUG_SCAN(priv, "Abort scan work\n");
-
-       /* We keep scan_check work queued in case when firmware will not
-        * report back scan completed notification */
-       mutex_lock(&priv->mutex);
-       iwl_legacy_scan_cancel_timeout(priv, 200);
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl_legacy_bg_scan_completed(struct work_struct *work)
-{
-       struct iwl_priv *priv =
-           container_of(work, struct iwl_priv, scan_completed);
-       bool aborted;
-
-       IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
-       cancel_delayed_work(&priv->scan_check);
-
-       mutex_lock(&priv->mutex);
-
-       aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
-       if (aborted)
-               IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
-       if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
-               IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
-               goto out_settings;
-       }
-
-       iwl_legacy_complete_scan(priv, aborted);
-
-out_settings:
-       /* Can we still talk to firmware ? */
-       if (!iwl_legacy_is_ready_rf(priv))
-               goto out;
-
-       /*
-        * We do not commit power settings while scan is pending,
-        * do it now if the settings changed.
-        */
-       iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
-       iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-
-       priv->cfg->ops->utils->post_scan(priv);
-
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
-{
-       INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
-       INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
-       INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
-
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
-{
-       cancel_work_sync(&priv->abort_scan);
-       cancel_work_sync(&priv->scan_completed);
-
-       if (cancel_delayed_work_sync(&priv->scan_check)) {
-               mutex_lock(&priv->mutex);
-               iwl_legacy_force_scan_end(priv);
-               mutex_unlock(&priv->mutex);
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
index 9f70a47..85fe48e 100644 (file)
@@ -26,8 +26,8 @@
  *
  *****************************************************************************/
 
-#ifndef __iwl_legacy_spectrum_h__
-#define __iwl_legacy_spectrum_h__
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
 enum {                         /* ieee80211_basic_report.map */
        IEEE80211_BASIC_MAP_BSS = (1 << 0),
        IEEE80211_BASIC_MAP_OFDM = (1 << 1),
index f10df3e..75fe315 100644 (file)
 #include "iwl-core.h"
 #include "iwl-sta.h"
 
-/* priv->sta_lock must be held */
-static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+/* il->sta_lock must be held */
+static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
 {
 
-       if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
-               IWL_ERR(priv,
+       if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+               IL_ERR(
                        "ACTIVATE a non DRIVER active station id %u addr %pM\n",
-                       sta_id, priv->stations[sta_id].sta.sta.addr);
+                       sta_id, il->stations[sta_id].sta.sta.addr);
 
-       if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
-               IWL_DEBUG_ASSOC(priv,
+       if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+               D_ASSOC(
                        "STA id %u addr %pM already present"
                        " in uCode (according to driver)\n",
-                       sta_id, priv->stations[sta_id].sta.sta.addr);
+                       sta_id, il->stations[sta_id].sta.sta.addr);
        } else {
-               priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
-               IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
-                               sta_id, priv->stations[sta_id].sta.sta.addr);
+               il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+               D_ASSOC("Added STA id %u addr %pM to uCode\n",
+                               sta_id, il->stations[sta_id].sta.sta.addr);
        }
 }
 
-static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
-                                   struct iwl_legacy_addsta_cmd *addsta,
-                                   struct iwl_rx_packet *pkt,
+static int il_process_add_sta_resp(struct il_priv *il,
+                                   struct il_addsta_cmd *addsta,
+                                   struct il_rx_pkt *pkt,
                                    bool sync)
 {
        u8 sta_id = addsta->sta.sta_id;
        unsigned long flags;
        int ret = -EIO;
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from C_ADD_STA (0x%08X)\n",
                        pkt->hdr.flags);
                return ret;
        }
 
-       IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+       D_INFO("Processing response for adding station %u\n",
                       sta_id);
 
-       spin_lock_irqsave(&priv->sta_lock, flags);
+       spin_lock_irqsave(&il->sta_lock, flags);
 
        switch (pkt->u.add_sta.status) {
        case ADD_STA_SUCCESS_MSK:
-               IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-               iwl_legacy_sta_ucode_activate(priv, sta_id);
+               D_INFO("C_ADD_STA PASSED\n");
+               il_sta_ucode_activate(il, sta_id);
                ret = 0;
                break;
-       case ADD_STA_NO_ROOM_IN_TABLE:
-               IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+       case ADD_STA_NO_ROOM_IN_TBL:
+               IL_ERR("Adding station %d failed, no room in table.\n",
                        sta_id);
                break;
        case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-               IWL_ERR(priv,
+               IL_ERR(
                        "Adding station %d failed, no block ack resource.\n",
                        sta_id);
                break;
        case ADD_STA_MODIFY_NON_EXIST_STA:
-               IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
+               IL_ERR("Attempting to modify non-existing station %d\n",
                        sta_id);
                break;
        default:
-               IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+               D_ASSOC("Received C_ADD_STA:(0x%08X)\n",
                                pkt->u.add_sta.status);
                break;
        }
 
-       IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-                      priv->stations[sta_id].sta.mode ==
+       D_INFO("%s station id %u addr %pM\n",
+                      il->stations[sta_id].sta.mode ==
                       STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-                      sta_id, priv->stations[sta_id].sta.sta.addr);
+                      sta_id, il->stations[sta_id].sta.sta.addr);
 
        /*
         * XXX: The MAC address in the command buffer is often changed from
@@ -116,68 +116,68 @@ static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
         * issue has not yet been resolved and this debugging is left to
         * observe the problem.
         */
-       IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-                      priv->stations[sta_id].sta.mode ==
+       D_INFO("%s station according to cmd buffer %pM\n",
+                      il->stations[sta_id].sta.mode ==
                       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
                       addsta->sta.addr);
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
 
        return ret;
 }
 
-static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
-                                struct iwl_device_cmd *cmd,
-                                struct iwl_rx_packet *pkt)
+static void il_add_sta_callback(struct il_priv *il,
+                                struct il_device_cmd *cmd,
+                                struct il_rx_pkt *pkt)
 {
-       struct iwl_legacy_addsta_cmd *addsta =
-               (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
+       struct il_addsta_cmd *addsta =
+               (struct il_addsta_cmd *)cmd->cmd.payload;
 
-       iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
+       il_process_add_sta_resp(il, addsta, pkt, false);
 
 }
 
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
-                    struct iwl_legacy_addsta_cmd *sta, u8 flags)
+int il_send_add_sta(struct il_priv *il,
+                    struct il_addsta_cmd *sta, u8 flags)
 {
-       struct iwl_rx_packet *pkt = NULL;
+       struct il_rx_pkt *pkt = NULL;
        int ret = 0;
        u8 data[sizeof(*sta)];
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_ADD_STA,
+       struct il_host_cmd cmd = {
+               .id = C_ADD_STA,
                .flags = flags,
                .data = data,
        };
        u8 sta_id __maybe_unused = sta->sta.sta_id;
 
-       IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+       D_INFO("Adding sta %u (%pM) %ssynchronously\n",
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
 
        if (flags & CMD_ASYNC)
-               cmd.callback = iwl_legacy_add_sta_callback;
+               cmd.callback = il_add_sta_callback;
        else {
                cmd.flags |= CMD_WANT_SKB;
                might_sleep();
        }
 
-       cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
-       ret = iwl_legacy_send_cmd(priv, &cmd);
+       cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+       ret = il_send_cmd(il, &cmd);
 
        if (ret || (flags & CMD_ASYNC))
                return ret;
 
        if (ret == 0) {
-               pkt = (struct iwl_rx_packet *)cmd.reply_page;
-               ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
+               pkt = (struct il_rx_pkt *)cmd.reply_page;
+               ret = il_process_add_sta_resp(il, sta, pkt, true);
        }
-       iwl_legacy_free_pages(priv, cmd.reply_page);
+       il_free_pages(il, cmd.reply_page);
 
        return ret;
 }
-EXPORT_SYMBOL(iwl_legacy_send_add_sta);
+EXPORT_SYMBOL(il_send_add_sta);
 
-static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
+static void il_set_ht_add_station(struct il_priv *il, u8 idx,
                                   struct ieee80211_sta *sta,
-                                  struct iwl_rxon_context *ctx)
+                                  struct il_rxon_context *ctx)
 {
        struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
        __le32 sta_flags;
@@ -187,13 +187,13 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
                goto done;
 
        mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
-       IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
+       D_ASSOC("spatial multiplexing power save mode: %s\n",
                        (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
                        "static" :
                        (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
                        "dynamic" : "disabled");
 
-       sta_flags = priv->stations[index].sta.station_flags;
+       sta_flags = il->stations[idx].sta.station_flags;
 
        sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
 
@@ -207,7 +207,7 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
        case WLAN_HT_CAP_SM_PS_DISABLED:
                break;
        default:
-               IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
+               IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
                break;
        }
 
@@ -217,27 +217,27 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
        sta_flags |= cpu_to_le32(
              (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
 
-       if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
+       if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
                sta_flags |= STA_FLG_HT40_EN_MSK;
        else
                sta_flags &= ~STA_FLG_HT40_EN_MSK;
 
-       priv->stations[index].sta.station_flags = sta_flags;
+       il->stations[idx].sta.station_flags = sta_flags;
  done:
        return;
 }
 
 /**
- * iwl_legacy_prep_station - Prepare station information for addition
+ * il_prep_station - Prepare station information for addition
  *
  * should be called with sta_lock held
  */
-u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
                    const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
 {
-       struct iwl_station_entry *station;
+       struct il_station_entry *station;
        int i;
-       u8 sta_id = IWL_INVALID_STATION;
+       u8 sta_id = IL_INVALID_STATION;
        u16 rate;
 
        if (is_ap)
@@ -245,15 +245,15 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        else if (is_broadcast_ether_addr(addr))
                sta_id = ctx->bcast_sta_id;
        else
-               for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
-                       if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
+               for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+                       if (!compare_ether_addr(il->stations[i].sta.sta.addr,
                                                addr)) {
                                sta_id = i;
                                break;
                        }
 
-                       if (!priv->stations[i].used &&
-                           sta_id == IWL_INVALID_STATION)
+                       if (!il->stations[i].used &&
+                           sta_id == IL_INVALID_STATION)
                                sta_id = i;
                }
 
@@ -261,7 +261,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
         * These two conditions have the same outcome, but keep them
         * separate
         */
-       if (unlikely(sta_id == IWL_INVALID_STATION))
+       if (unlikely(sta_id == IL_INVALID_STATION))
                return sta_id;
 
        /*
@@ -269,30 +269,30 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
         * station. Keep track if one is in progress so that we do not send
         * another.
         */
-       if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
-               IWL_DEBUG_INFO(priv,
+       if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+               D_INFO(
                                "STA %d already in process of being added.\n",
                                sta_id);
                return sta_id;
        }
 
-       if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
-           (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
-           !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
-               IWL_DEBUG_ASSOC(priv,
+       if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+           (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+           !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+               D_ASSOC(
                                "STA %d (%pM) already added, not adding again.\n",
                                sta_id, addr);
                return sta_id;
        }
 
-       station = &priv->stations[sta_id];
-       station->used = IWL_STA_DRIVER_ACTIVE;
-       IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+       station = &il->stations[sta_id];
+       station->used = IL_STA_DRIVER_ACTIVE;
+       D_ASSOC("Add STA to driver ID %d: %pM\n",
                        sta_id, addr);
-       priv->num_stations++;
+       il->num_stations++;
 
-       /* Set up the REPLY_ADD_STA command to send to device */
-       memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
+       /* Set up the C_ADD_STA command to send to device */
+       memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
        memcpy(station->sta.sta.addr, addr, ETH_ALEN);
        station->sta.mode = 0;
        station->sta.sta.sta_id = sta_id;
@@ -300,7 +300,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        station->ctxid = ctx->ctxid;
 
        if (sta) {
-               struct iwl_station_priv_common *sta_priv;
+               struct il_station_priv_common *sta_priv;
 
                sta_priv = (void *)sta->drv_priv;
                sta_priv->ctx = ctx;
@@ -311,42 +311,42 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
         * STA and broadcast STA) pass in a NULL sta, and mac80211
         * doesn't allow HT IBSS.
         */
-       iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
+       il_set_ht_add_station(il, sta_id, sta, ctx);
 
        /* 3945 only */
-       rate = (priv->band == IEEE80211_BAND_5GHZ) ?
-               IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
+       rate = (il->band == IEEE80211_BAND_5GHZ) ?
+               RATE_6M_PLCP : RATE_1M_PLCP;
        /* Turn on both antennas for the station... */
        station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
 
        return sta_id;
 
 }
-EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
+EXPORT_SYMBOL_GPL(il_prep_station);
 
 #define STA_WAIT_TIMEOUT (HZ/2)
 
 /**
- * iwl_legacy_add_station_common -
+ * il_add_station_common -
  */
 int
-iwl_legacy_add_station_common(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
+il_add_station_common(struct il_priv *il,
+                       struct il_rxon_context *ctx,
                           const u8 *addr, bool is_ap,
                           struct ieee80211_sta *sta, u8 *sta_id_r)
 {
        unsigned long flags_spin;
        int ret = 0;
        u8 sta_id;
-       struct iwl_legacy_addsta_cmd sta_cmd;
+       struct il_addsta_cmd sta_cmd;
 
        *sta_id_r = 0;
-       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-       sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+       if (sta_id == IL_INVALID_STATION) {
+               IL_ERR("Unable to prepare station %pM for addition\n",
                        addr);
-               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
                return -EINVAL;
        }
 
@@ -355,75 +355,75 @@ iwl_legacy_add_station_common(struct iwl_priv *priv,
         * station. Keep track if one is in progress so that we do not send
         * another.
         */
-       if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
-               IWL_DEBUG_INFO(priv,
+       if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+               D_INFO(
                        "STA %d already in process of being added.\n",
                       sta_id);
-               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
                return -EEXIST;
        }
 
-       if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
-           (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
-               IWL_DEBUG_ASSOC(priv,
+       if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+           (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+               D_ASSOC(
                        "STA %d (%pM) already added, not adding again.\n",
                        sta_id, addr);
-               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
                return -EEXIST;
        }
 
-       priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta,
-                               sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+       memcpy(&sta_cmd, &il->stations[sta_id].sta,
+                               sizeof(struct il_addsta_cmd));
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
 
        /* Add station to device's station table */
-       ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+       ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
        if (ret) {
-               spin_lock_irqsave(&priv->sta_lock, flags_spin);
-               IWL_ERR(priv, "Adding station %pM failed.\n",
-                       priv->stations[sta_id].sta.sta.addr);
-               priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-               priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               spin_lock_irqsave(&il->sta_lock, flags_spin);
+               IL_ERR("Adding station %pM failed.\n",
+                       il->stations[sta_id].sta.sta.addr);
+               il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+               il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
        }
        *sta_id_r = sta_id;
        return ret;
 }
-EXPORT_SYMBOL(iwl_legacy_add_station_common);
+EXPORT_SYMBOL(il_add_station_common);
 
 /**
- * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
  *
- * priv->sta_lock must be held
+ * il->sta_lock must be held
  */
-static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
 {
        /* Ucode must be active and driver must be non active */
-       if ((priv->stations[sta_id].used &
-            (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
-                                               IWL_STA_UCODE_ACTIVE)
-               IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+       if ((il->stations[sta_id].used &
+            (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+                                               IL_STA_UCODE_ACTIVE)
+               IL_ERR("removed non active STA %u\n", sta_id);
 
-       priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+       il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
 
-       memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
-       IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+       memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+       D_ASSOC("Removed STA %u\n", sta_id);
 }
 
-static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
+static int il_send_remove_station(struct il_priv *il,
                                   const u8 *addr, int sta_id,
                                   bool temporary)
 {
-       struct iwl_rx_packet *pkt;
+       struct il_rx_pkt *pkt;
        int ret;
 
        unsigned long flags_spin;
-       struct iwl_rem_sta_cmd rm_sta_cmd;
+       struct il_rem_sta_cmd rm_sta_cmd;
 
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_REMOVE_STA,
-               .len = sizeof(struct iwl_rem_sta_cmd),
+       struct il_host_cmd cmd = {
+               .id = C_REM_STA,
+               .len = sizeof(struct il_rem_sta_cmd),
                .flags = CMD_SYNC,
                .data = &rm_sta_cmd,
        };
@@ -434,14 +434,14 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
 
        cmd.flags |= CMD_WANT_SKB;
 
-       ret = iwl_legacy_send_cmd(priv, &cmd);
+       ret = il_send_cmd(il, &cmd);
 
        if (ret)
                return ret;
 
-       pkt = (struct iwl_rx_packet *)cmd.reply_page;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
+       pkt = (struct il_rx_pkt *)cmd.reply_page;
+       if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+               IL_ERR("Bad return from C_REM_STA (0x%08X)\n",
                          pkt->hdr.flags);
                ret = -EIO;
        }
@@ -450,34 +450,34 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
                switch (pkt->u.rem_sta.status) {
                case REM_STA_SUCCESS_MSK:
                        if (!temporary) {
-                               spin_lock_irqsave(&priv->sta_lock, flags_spin);
-                               iwl_legacy_sta_ucode_deactivate(priv, sta_id);
-                               spin_unlock_irqrestore(&priv->sta_lock,
+                               spin_lock_irqsave(&il->sta_lock, flags_spin);
+                               il_sta_ucode_deactivate(il, sta_id);
+                               spin_unlock_irqrestore(&il->sta_lock,
                                                                flags_spin);
                        }
-                       IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+                       D_ASSOC("C_REM_STA PASSED\n");
                        break;
                default:
                        ret = -EIO;
-                       IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+                       IL_ERR("C_REM_STA failed\n");
                        break;
                }
        }
-       iwl_legacy_free_pages(priv, cmd.reply_page);
+       il_free_pages(il, cmd.reply_page);
 
        return ret;
 }
 
 /**
- * iwl_legacy_remove_station - Remove driver's knowledge of station.
+ * il_remove_station - Remove driver's knowledge of station.
  */
-int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
+int il_remove_station(struct il_priv *il, const u8 sta_id,
                       const u8 *addr)
 {
        unsigned long flags;
 
-       if (!iwl_legacy_is_ready(priv)) {
-               IWL_DEBUG_INFO(priv,
+       if (!il_is_ready(il)) {
+               D_INFO(
                        "Unable to remove station %pM, device not ready.\n",
                        addr);
                /*
@@ -488,85 +488,85 @@ int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
                return 0;
        }
 
-       IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d  %pM\n",
+       D_ASSOC("Removing STA from driver:%d  %pM\n",
                        sta_id, addr);
 
-       if (WARN_ON(sta_id == IWL_INVALID_STATION))
+       if (WARN_ON(sta_id == IL_INVALID_STATION))
                return -EINVAL;
 
-       spin_lock_irqsave(&priv->sta_lock, flags);
+       spin_lock_irqsave(&il->sta_lock, flags);
 
-       if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
-               IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+       if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+               D_INFO("Removing %pM but non DRIVER active\n",
                                addr);
                goto out_err;
        }
 
-       if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
-               IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+       if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+               D_INFO("Removing %pM but non UCODE active\n",
                                addr);
                goto out_err;
        }
 
-       if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
-               kfree(priv->stations[sta_id].lq);
-               priv->stations[sta_id].lq = NULL;
+       if (il->stations[sta_id].used & IL_STA_LOCAL) {
+               kfree(il->stations[sta_id].lq);
+               il->stations[sta_id].lq = NULL;
        }
 
-       priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+       il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
 
-       priv->num_stations--;
+       il->num_stations--;
 
-       BUG_ON(priv->num_stations < 0);
+       BUG_ON(il->num_stations < 0);
 
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
 
-       return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
+       return il_send_remove_station(il, addr, sta_id, false);
 out_err:
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
        return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
+EXPORT_SYMBOL_GPL(il_remove_station);
 
 /**
- * iwl_legacy_clear_ucode_stations - clear ucode station table bits
+ * il_clear_ucode_stations - clear ucode station table bits
  *
  * This function clears all the bits in the driver indicating
  * which stations are active in the ucode. Call when something
  * other than explicit station management would cause this in
  * the ucode, e.g. unassociated RXON.
  */
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
-                             struct iwl_rxon_context *ctx)
+void il_clear_ucode_stations(struct il_priv *il,
+                             struct il_rxon_context *ctx)
 {
        int i;
        unsigned long flags_spin;
        bool cleared = false;
 
-       IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+       D_INFO("Clearing ucode stations in driver\n");
 
-       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-       for (i = 0; i < priv->hw_params.max_stations; i++) {
-               if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if (ctx && ctx->ctxid != il->stations[i].ctxid)
                        continue;
 
-               if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
-                       IWL_DEBUG_INFO(priv,
+               if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+                       D_INFO(
                                "Clearing ucode active for station %d\n", i);
-                       priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+                       il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
                        cleared = true;
                }
        }
-       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
 
        if (!cleared)
-               IWL_DEBUG_INFO(priv,
+               D_INFO(
                        "No active stations found to be cleared\n");
 }
-EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
+EXPORT_SYMBOL(il_clear_ucode_stations);
 
 /**
- * iwl_legacy_restore_stations() - Restore driver known stations to device
+ * il_restore_stations() - Restore driver known stations to device
  *
  * All stations considered active by driver, but not present in ucode, is
  * restored.
@@ -574,58 +574,58 @@ EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
  * Function sleeps.
  */
 void
-iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
 {
-       struct iwl_legacy_addsta_cmd sta_cmd;
-       struct iwl_link_quality_cmd lq;
+       struct il_addsta_cmd sta_cmd;
+       struct il_link_quality_cmd lq;
        unsigned long flags_spin;
        int i;
        bool found = false;
        int ret;
        bool send_lq;
 
-       if (!iwl_legacy_is_ready(priv)) {
-               IWL_DEBUG_INFO(priv,
+       if (!il_is_ready(il)) {
+               D_INFO(
                        "Not ready yet, not restoring any stations.\n");
                return;
        }
 
-       IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
-       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-       for (i = 0; i < priv->hw_params.max_stations; i++) {
-               if (ctx->ctxid != priv->stations[i].ctxid)
+       D_ASSOC("Restoring all known stations ... start.\n");
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if (ctx->ctxid != il->stations[i].ctxid)
                        continue;
-               if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
-                           !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
-                       IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
-                                       priv->stations[i].sta.sta.addr);
-                       priv->stations[i].sta.mode = 0;
-                       priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+               if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+                   !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+                       D_ASSOC("Restoring sta %pM\n",
+                                       il->stations[i].sta.sta.addr);
+                       il->stations[i].sta.mode = 0;
+                       il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
                        found = true;
                }
        }
 
-       for (i = 0; i < priv->hw_params.max_stations; i++) {
-               if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
-                       memcpy(&sta_cmd, &priv->stations[i].sta,
-                              sizeof(struct iwl_legacy_addsta_cmd));
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+                       memcpy(&sta_cmd, &il->stations[i].sta,
+                              sizeof(struct il_addsta_cmd));
                        send_lq = false;
-                       if (priv->stations[i].lq) {
-                               memcpy(&lq, priv->stations[i].lq,
-                                      sizeof(struct iwl_link_quality_cmd));
+                       if (il->stations[i].lq) {
+                               memcpy(&lq, il->stations[i].lq,
+                                      sizeof(struct il_link_quality_cmd));
                                send_lq = true;
                        }
-                       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-                       ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+                       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+                       ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
                        if (ret) {
-                               spin_lock_irqsave(&priv->sta_lock, flags_spin);
-                               IWL_ERR(priv, "Adding station %pM failed.\n",
-                                       priv->stations[i].sta.sta.addr);
-                               priv->stations[i].used &=
-                                               ~IWL_STA_DRIVER_ACTIVE;
-                               priv->stations[i].used &=
-                                               ~IWL_STA_UCODE_INPROGRESS;
-                               spin_unlock_irqrestore(&priv->sta_lock,
+                               spin_lock_irqsave(&il->sta_lock, flags_spin);
+                               IL_ERR("Adding station %pM failed.\n",
+                                       il->stations[i].sta.sta.addr);
+                               il->stations[i].used &=
+                                               ~IL_STA_DRIVER_ACTIVE;
+                               il->stations[i].used &=
+                                               ~IL_STA_UCODE_INPROGRESS;
+                               spin_unlock_irqrestore(&il->sta_lock,
                                                                flags_spin);
                        }
                        /*
@@ -633,78 +633,78 @@ iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
                         * current LQ command
                         */
                        if (send_lq)
-                               iwl_legacy_send_lq_cmd(priv, ctx, &lq,
+                               il_send_lq_cmd(il, ctx, &lq,
                                                                CMD_SYNC, true);
-                       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-                       priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+                       spin_lock_irqsave(&il->sta_lock, flags_spin);
+                       il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
                }
        }
 
-       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
        if (!found)
-               IWL_DEBUG_INFO(priv, "Restoring all known stations"
+               D_INFO("Restoring all known stations"
                                " .... no stations to be restored.\n");
        else
-               IWL_DEBUG_INFO(priv, "Restoring all known stations"
+               D_INFO("Restoring all known stations"
                                " .... complete.\n");
 }
-EXPORT_SYMBOL(iwl_legacy_restore_stations);
+EXPORT_SYMBOL(il_restore_stations);
 
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
+int il_get_free_ucode_key_idx(struct il_priv *il)
 {
        int i;
 
-       for (i = 0; i < priv->sta_key_max_num; i++)
-               if (!test_and_set_bit(i, &priv->ucode_key_table))
+       for (i = 0; i < il->sta_key_max_num; i++)
+               if (!test_and_set_bit(i, &il->ucode_key_table))
                        return i;
 
        return WEP_INVALID_OFFSET;
 }
-EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
 
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
+void il_dealloc_bcast_stations(struct il_priv *il)
 {
        unsigned long flags;
        int i;
 
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       for (i = 0; i < priv->hw_params.max_stations; i++) {
-               if (!(priv->stations[i].used & IWL_STA_BCAST))
+       spin_lock_irqsave(&il->sta_lock, flags);
+       for (i = 0; i < il->hw_params.max_stations; i++) {
+               if (!(il->stations[i].used & IL_STA_BCAST))
                        continue;
 
-               priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
-               priv->num_stations--;
-               BUG_ON(priv->num_stations < 0);
-               kfree(priv->stations[i].lq);
-               priv->stations[i].lq = NULL;
+               il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+               il->num_stations--;
+               BUG_ON(il->num_stations < 0);
+               kfree(il->stations[i].lq);
+               il->stations[i].lq = NULL;
        }
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
+       spin_unlock_irqrestore(&il->sta_lock, flags);
 }
-EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
 
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
-                          struct iwl_link_quality_cmd *lq)
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void il_dump_lq_cmd(struct il_priv *il,
+                          struct il_link_quality_cmd *lq)
 {
        int i;
-       IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
-       IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+       D_RATE("lq station id 0x%x\n", lq->sta_id);
+       D_RATE("lq ant 0x%X 0x%X\n",
                       lq->general_params.single_stream_ant_msk,
                       lq->general_params.dual_stream_ant_msk);
 
        for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
-               IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+               D_RATE("lq idx %d 0x%X\n",
                               i, lq->rs_table[i].rate_n_flags);
 }
 #else
-static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
-                                  struct iwl_link_quality_cmd *lq)
+static inline void il_dump_lq_cmd(struct il_priv *il,
+                                  struct il_link_quality_cmd *lq)
 {
 }
 #endif
 
 /**
- * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
  *
  * It sometimes happens when a HT rate has been in use and we
  * loose connectivity with AP then mac80211 will first tell us that the
@@ -714,22 +714,22 @@ static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
  * Test for this to prevent driver from sending LQ command between the time
  * RXON flags are updated and when LQ command is updated.
  */
-static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
-                             struct iwl_rxon_context *ctx,
-                             struct iwl_link_quality_cmd *lq)
+static bool il_is_lq_table_valid(struct il_priv *il,
+                             struct il_rxon_context *ctx,
+                             struct il_link_quality_cmd *lq)
 {
        int i;
 
        if (ctx->ht.enabled)
                return true;
 
-       IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+       D_INFO("Channel %u is not an HT channel\n",
                       ctx->active.channel);
        for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
                if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
                                                RATE_MCS_HT_MSK) {
-                       IWL_DEBUG_INFO(priv,
-                                      "index %d of LQ expects HT channel\n",
+                       D_INFO(
+                                      "idx %d of LQ expects HT channel\n",
                                       i);
                        return false;
                }
@@ -738,7 +738,7 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
 }
 
 /**
- * iwl_legacy_send_lq_cmd() - Send link quality command
+ * il_send_lq_cmd() - Send link quality command
  * @init: This command is sent as part of station initialization right
  *        after station has been added.
  *
@@ -747,35 +747,35 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
  * this case to clear the state indicating that station creation is in
  * progress.
  */
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
-                   struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+                   struct il_link_quality_cmd *lq, u8 flags, bool init)
 {
        int ret = 0;
        unsigned long flags_spin;
 
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_TX_LINK_QUALITY_CMD,
-               .len = sizeof(struct iwl_link_quality_cmd),
+       struct il_host_cmd cmd = {
+               .id = C_TX_LINK_QUALITY_CMD,
+               .len = sizeof(struct il_link_quality_cmd),
                .flags = flags,
                .data = lq,
        };
 
-       if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+       if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
                return -EINVAL;
 
 
-       spin_lock_irqsave(&priv->sta_lock, flags_spin);
-       if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
-               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       spin_lock_irqsave(&il->sta_lock, flags_spin);
+       if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
                return -EINVAL;
        }
-       spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+       spin_unlock_irqrestore(&il->sta_lock, flags_spin);
 
-       iwl_legacy_dump_lq_cmd(priv, lq);
+       il_dump_lq_cmd(il, lq);
        BUG_ON(init && (cmd.flags & CMD_ASYNC));
 
-       if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
-               ret = iwl_legacy_send_cmd(priv, &cmd);
+       if (il_is_lq_table_valid(il, ctx, lq))
+               ret = il_send_cmd(il, &cmd);
        else
                ret = -EINVAL;
 
@@ -783,35 +783,35 @@ int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                return ret;
 
        if (init) {
-               IWL_DEBUG_INFO(priv, "init LQ command complete,"
+               D_INFO("init LQ command complete,"
                                " clearing sta addition status for sta %d\n",
                               lq->sta_id);
-               spin_lock_irqsave(&priv->sta_lock, flags_spin);
-               priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-               spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
+               spin_lock_irqsave(&il->sta_lock, flags_spin);
+               il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+               spin_unlock_irqrestore(&il->sta_lock, flags_spin);
        }
        return ret;
 }
-EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
+EXPORT_SYMBOL(il_send_lq_cmd);
 
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
+int il_mac_sta_remove(struct ieee80211_hw *hw,
                       struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta)
 {
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
+       struct il_priv *il = hw->priv;
+       struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
        int ret;
 
-       IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
+       D_INFO("received request to remove station %pM\n",
                        sta->addr);
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+       mutex_lock(&il->mutex);
+       D_INFO("proceeding to remove station %pM\n",
                        sta->addr);
-       ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
+       ret = il_remove_station(il, sta_common->sta_id, sta->addr);
        if (ret)
-               IWL_ERR(priv, "Error removing station %pM\n",
+               IL_ERR("Error removing station %pM\n",
                        sta->addr);
-       mutex_unlock(&priv->mutex);
+       mutex_unlock(&il->mutex);
        return ret;
 }
-EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
+EXPORT_SYMBOL(il_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644 (file)
index 67bd75f..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_sta_h__
-#define __iwl_legacy_sta_h__
-
-#include "iwl-dev.h"
-
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE  BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS  BIT(2) /* ucode entry is in process of
-                                           being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
-                               (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_legacy_restore_stations(struct iwl_priv *priv,
-                               struct iwl_rxon_context *ctx);
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
-                             struct iwl_rxon_context *ctx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
-                       struct iwl_legacy_addsta_cmd *sta, u8 flags);
-int iwl_legacy_add_station_common(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
-                       const u8 *addr, bool is_ap,
-                       struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_legacy_remove_station(struct iwl_priv *priv,
-                       const u8 sta_id,
-                       const u8 *addr);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
-                       struct ieee80211_vif *vif,
-                       struct ieee80211_sta *sta);
-
-u8 iwl_legacy_prep_station(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
-                       const u8 *addr, bool is_ap,
-                       struct ieee80211_sta *sta);
-
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
-                       struct iwl_rxon_context *ctx,
-                       struct iwl_link_quality_cmd *lq,
-                       u8 flags, bool init);
-
-/**
- * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       struct iwl_rxon_context *ctx;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       memset(priv->stations, 0, sizeof(priv->stations));
-       priv->num_stations = 0;
-
-       priv->ucode_key_table = 0;
-
-       for_each_context(priv, ctx) {
-               /*
-                * Remove all key information that is not stored as part
-                * of station information since mac80211 may not have had
-                * a chance to remove all the keys. When device is
-                * reconfigured by mac80211 after an error all keys will
-                * be reconfigured.
-                */
-               memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
-               ctx->key_mapping_keys = 0;
-       }
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
-{
-       if (WARN_ON(!sta))
-               return IWL_INVALID_STATION;
-
-       return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
-                                         struct iwl_rxon_context *context,
-                                         struct ieee80211_sta *sta)
-{
-       int sta_id;
-
-       if (!sta)
-               return context->bcast_sta_id;
-
-       sta_id = iwl_legacy_sta_id(sta);
-
-       /*
-        * mac80211 should not be passing a partially
-        * initialised station!
-        */
-       WARN_ON(sta_id == IWL_INVALID_STATION);
-
-       return sta_id;
-}
-#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644 (file)
index c0dfb1a..0000000
+++ /dev/null
@@ -1,659 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/**
- * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
- */
-void
-iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       u32 reg = 0;
-       int txq_id = txq->q.id;
-
-       if (txq->need_update == 0)
-               return;
-
-       /* if we're trying to save power */
-       if (test_bit(STATUS_POWER_PMI, &priv->status)) {
-               /* wake up nic if it's powered down ...
-                * uCode will wake up, and interrupt us again, so next
-                * time we'll skip this part. */
-               reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-                       IWL_DEBUG_INFO(priv,
-                                       "Tx queue %d requesting wakeup,"
-                                       " GP1 = 0x%x\n", txq_id, reg);
-                       iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
-                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       return;
-               }
-
-               iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
-                               txq->q.write_ptr | (txq_id << 8));
-
-               /*
-                * else not in power-save mode,
-                * uCode will never sleep when we're
-                * trying to tx (during RFKILL, we're not trying to tx).
-                */
-       } else
-               iwl_write32(priv, HBUS_TARG_WRPTR,
-                           txq->q.write_ptr | (txq_id << 8));
-       txq->need_update = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
-
-/**
- * iwl_legacy_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
- */
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
-
-       if (q->n_bd == 0)
-               return;
-
-       while (q->write_ptr != q->read_ptr) {
-               priv->cfg->ops->lib->txq_free_tfd(priv, txq);
-               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
-
-/**
- * iwl_legacy_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct device *dev = &priv->pci_dev->dev;
-       int i;
-
-       iwl_legacy_tx_queue_unmap(priv, txq_id);
-
-       /* De-alloc array of command/tx buffers */
-       for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
-               kfree(txq->cmd[i]);
-
-       /* De-alloc circular buffer of TFDs */
-       if (txq->q.n_bd)
-               dma_free_coherent(dev, priv->hw_params.tfd_size *
-                                 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
-       /* De-alloc array of per-TFD driver data */
-       kfree(txq->txb);
-       txq->txb = NULL;
-
-       /* deallocate arrays */
-       kfree(txq->cmd);
-       kfree(txq->meta);
-       txq->cmd = NULL;
-       txq->meta = NULL;
-
-       /* 0-fill queue descriptor structure */
-       memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
-{
-       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-       struct iwl_queue *q = &txq->q;
-       int i;
-
-       if (q->n_bd == 0)
-               return;
-
-       while (q->read_ptr != q->write_ptr) {
-               i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
-
-               if (txq->meta[i].flags & CMD_MAPPED) {
-                       pci_unmap_single(priv->pci_dev,
-                                        dma_unmap_addr(&txq->meta[i], mapping),
-                                        dma_unmap_len(&txq->meta[i], len),
-                                        PCI_DMA_BIDIRECTIONAL);
-                       txq->meta[i].flags = 0;
-               }
-
-               q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
-       }
-
-       i = q->n_window;
-       if (txq->meta[i].flags & CMD_MAPPED) {
-               pci_unmap_single(priv->pci_dev,
-                                dma_unmap_addr(&txq->meta[i], mapping),
-                                dma_unmap_len(&txq->meta[i], len),
-                                PCI_DMA_BIDIRECTIONAL);
-               txq->meta[i].flags = 0;
-       }
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
-
-/**
- * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
-{
-       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-       struct device *dev = &priv->pci_dev->dev;
-       int i;
-
-       iwl_legacy_cmd_queue_unmap(priv);
-
-       /* De-alloc array of command/tx buffers */
-       for (i = 0; i <= TFD_CMD_SLOTS; i++)
-               kfree(txq->cmd[i]);
-
-       /* De-alloc circular buffer of TFDs */
-       if (txq->q.n_bd)
-               dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
-                                 txq->tfds, txq->q.dma_addr);
-
-       /* deallocate arrays */
-       kfree(txq->cmd);
-       kfree(txq->meta);
-       txq->cmd = NULL;
-       txq->meta = NULL;
-
-       /* 0-fill queue descriptor structure */
-       memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill.  Driver and device exchange status of each
- * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- * See more detailed info in iwl-4965-hw.h.
- ***************************************************/
-
-int iwl_legacy_queue_space(const struct iwl_queue *q)
-{
-       int s = q->read_ptr - q->write_ptr;
-
-       if (q->read_ptr > q->write_ptr)
-               s -= q->n_bd;
-
-       if (s <= 0)
-               s += q->n_window;
-       /* keep some reserve to not confuse empty and full situations */
-       s -= 2;
-       if (s < 0)
-               s = 0;
-       return s;
-}
-EXPORT_SYMBOL(iwl_legacy_queue_space);
-
-
-/**
- * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
-                         int count, int slots_num, u32 id)
-{
-       q->n_bd = count;
-       q->n_window = slots_num;
-       q->id = id;
-
-       /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
-        * and iwl_legacy_queue_dec_wrap are broken. */
-       BUG_ON(!is_power_of_2(count));
-
-       /* slots_num must be power-of-two size, otherwise
-        * iwl_legacy_get_cmd_index is broken. */
-       BUG_ON(!is_power_of_2(slots_num));
-
-       q->low_mark = q->n_window / 4;
-       if (q->low_mark < 4)
-               q->low_mark = 4;
-
-       q->high_mark = q->n_window / 8;
-       if (q->high_mark < 2)
-               q->high_mark = 2;
-
-       q->write_ptr = q->read_ptr = 0;
-
-       return 0;
-}
-
-/**
- * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
-                             struct iwl_tx_queue *txq, u32 id)
-{
-       struct device *dev = &priv->pci_dev->dev;
-       size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
-       /* Driver private data, only for Tx (not command) queues,
-        * not shared with device. */
-       if (id != priv->cmd_queue) {
-               txq->txb = kzalloc(sizeof(txq->txb[0]) *
-                                  TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
-               if (!txq->txb) {
-                       IWL_ERR(priv, "kmalloc for auxiliary BD "
-                                 "structures failed\n");
-                       goto error;
-               }
-       } else {
-               txq->txb = NULL;
-       }
-
-       /* Circular buffer of transmit frame descriptors (TFDs),
-        * shared with device */
-       txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
-                                      GFP_KERNEL);
-       if (!txq->tfds) {
-               IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
-               goto error;
-       }
-       txq->q.id = id;
-
-       return 0;
-
- error:
-       kfree(txq->txb);
-       txq->txb = NULL;
-
-       return -ENOMEM;
-}
-
-/**
- * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
-                     int slots_num, u32 txq_id)
-{
-       int i, len;
-       int ret;
-       int actual_slots = slots_num;
-
-       /*
-        * Alloc buffer array for commands (Tx or other types of commands).
-        * For the command queue (#4/#9), allocate command space + one big
-        * command for scan, since scan command is very huge; the system will
-        * not have two scans at the same time, so only one is needed.
-        * For normal Tx queues (all other queues), no super-size command
-        * space is needed.
-        */
-       if (txq_id == priv->cmd_queue)
-               actual_slots++;
-
-       txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
-                           GFP_KERNEL);
-       txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
-                          GFP_KERNEL);
-
-       if (!txq->meta || !txq->cmd)
-               goto out_free_arrays;
-
-       len = sizeof(struct iwl_device_cmd);
-       for (i = 0; i < actual_slots; i++) {
-               /* only happens for cmd queue */
-               if (i == slots_num)
-                       len = IWL_MAX_CMD_SIZE;
-
-               txq->cmd[i] = kmalloc(len, GFP_KERNEL);
-               if (!txq->cmd[i])
-                       goto err;
-       }
-
-       /* Alloc driver data array and TFD circular buffer */
-       ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
-       if (ret)
-               goto err;
-
-       txq->need_update = 0;
-
-       /*
-        * For the default queues 0-3, set up the swq_id
-        * already -- all others need to get one later
-        * (if they need one at all).
-        */
-       if (txq_id < 4)
-               iwl_legacy_set_swq_id(txq, txq_id, txq_id);
-
-       /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
-        * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
-       BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
-       /* Initialize queue's high/low-water marks, and head/tail indexes */
-       iwl_legacy_queue_init(priv, &txq->q,
-                               TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
-       /* Tell device where to find queue */
-       priv->cfg->ops->lib->txq_init(priv, txq);
-
-       return 0;
-err:
-       for (i = 0; i < actual_slots; i++)
-               kfree(txq->cmd[i]);
-out_free_arrays:
-       kfree(txq->meta);
-       kfree(txq->cmd);
-
-       return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
-
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
-                       int slots_num, u32 txq_id)
-{
-       int actual_slots = slots_num;
-
-       if (txq_id == priv->cmd_queue)
-               actual_slots++;
-
-       memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
-
-       txq->need_update = 0;
-
-       /* Initialize queue's high/low-water marks, and head/tail indexes */
-       iwl_legacy_queue_init(priv, &txq->q,
-                               TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
-       /* Tell device where to find queue */
-       priv->cfg->ops->lib->txq_init(priv, txq);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
-
-/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
-
-/**
- * iwl_legacy_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
-       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-       struct iwl_queue *q = &txq->q;
-       struct iwl_device_cmd *out_cmd;
-       struct iwl_cmd_meta *out_meta;
-       dma_addr_t phys_addr;
-       unsigned long flags;
-       int len;
-       u32 idx;
-       u16 fix_size;
-
-       cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
-       fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
-
-       /* If any of the command structures end up being larger than
-        * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
-        * we will need to increase the size of the TFD entries
-        * Also, check to see if command buffer should not exceed the size
-        * of device_cmd and max_cmd_size. */
-       BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
-              !(cmd->flags & CMD_SIZE_HUGE));
-       BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
-
-       if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
-               IWL_WARN(priv, "Not sending command - %s KILL\n",
-                        iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
-               return -EIO;
-       }
-
-       spin_lock_irqsave(&priv->hcmd_lock, flags);
-
-       if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
-               spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
-               IWL_ERR(priv, "Restarting adapter due to command queue full\n");
-               queue_work(priv->workqueue, &priv->restart);
-               return -ENOSPC;
-       }
-
-       idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
-       out_cmd = txq->cmd[idx];
-       out_meta = &txq->meta[idx];
-
-       if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
-               spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-               return -ENOSPC;
-       }
-
-       memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
-       out_meta->flags = cmd->flags | CMD_MAPPED;
-       if (cmd->flags & CMD_WANT_SKB)
-               out_meta->source = cmd;
-       if (cmd->flags & CMD_ASYNC)
-               out_meta->callback = cmd->callback;
-
-       out_cmd->hdr.cmd = cmd->id;
-       memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
-
-       /* At this point, the out_cmd now has all of the incoming cmd
-        * information */
-
-       out_cmd->hdr.flags = 0;
-       out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
-                       INDEX_TO_SEQ(q->write_ptr));
-       if (cmd->flags & CMD_SIZE_HUGE)
-               out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
-       len = sizeof(struct iwl_device_cmd);
-       if (idx == TFD_CMD_SLOTS)
-               len = IWL_MAX_CMD_SIZE;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       switch (out_cmd->hdr.cmd) {
-       case REPLY_TX_LINK_QUALITY_CMD:
-       case SENSITIVITY_CMD:
-               IWL_DEBUG_HC_DUMP(priv,
-                               "Sending command %s (#%x), seq: 0x%04X, "
-                               "%d bytes at %d[%d]:%d\n",
-                               iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
-                               out_cmd->hdr.cmd,
-                               le16_to_cpu(out_cmd->hdr.sequence), fix_size,
-                               q->write_ptr, idx, priv->cmd_queue);
-               break;
-       default:
-               IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
-                               "%d bytes at %d[%d]:%d\n",
-                               iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
-                               out_cmd->hdr.cmd,
-                               le16_to_cpu(out_cmd->hdr.sequence), fix_size,
-                               q->write_ptr, idx, priv->cmd_queue);
-       }
-#endif
-       txq->need_update = 1;
-
-       if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
-               /* Set up entry in queue's byte count circular buffer */
-               priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
-
-       phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
-                                  fix_size, PCI_DMA_BIDIRECTIONAL);
-       dma_unmap_addr_set(out_meta, mapping, phys_addr);
-       dma_unmap_len_set(out_meta, len, fix_size);
-
-       trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
-                                               fix_size, cmd->flags);
-
-       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                  phys_addr, fix_size, 1,
-                                                  U32_PAD(cmd->len));
-
-       /* Increment and update queue's write index */
-       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_legacy_txq_update_write_ptr(priv, txq);
-
-       spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-       return idx;
-}
-
-/**
- * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms.  If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
-                                  int idx, int cmd_idx)
-{
-       struct iwl_tx_queue *txq = &priv->txq[txq_id];
-       struct iwl_queue *q = &txq->q;
-       int nfreed = 0;
-
-       if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
-               IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
-                         "is out of range [0-%d] %d %d.\n", txq_id,
-                         idx, q->n_bd, q->write_ptr, q->read_ptr);
-               return;
-       }
-
-       for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
-            q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
-               if (nfreed++ > 0) {
-                       IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
-                                       q->write_ptr, q->read_ptr);
-                       queue_work(priv->workqueue, &priv->restart);
-               }
-
-       }
-}
-
-/**
- * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed.  The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void
-iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u16 sequence = le16_to_cpu(pkt->hdr.sequence);
-       int txq_id = SEQ_TO_QUEUE(sequence);
-       int index = SEQ_TO_INDEX(sequence);
-       int cmd_index;
-       bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
-       struct iwl_device_cmd *cmd;
-       struct iwl_cmd_meta *meta;
-       struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
-       unsigned long flags;
-
-       /* If a Tx command is being handled and it isn't in the actual
-        * command queue then there a command routing bug has been introduced
-        * in the queue management code. */
-       if (WARN(txq_id != priv->cmd_queue,
-                "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
-                 txq_id, priv->cmd_queue, sequence,
-                 priv->txq[priv->cmd_queue].q.read_ptr,
-                 priv->txq[priv->cmd_queue].q.write_ptr)) {
-               iwl_print_hex_error(priv, pkt, 32);
-               return;
-       }
-
-       cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
-       cmd = txq->cmd[cmd_index];
-       meta = &txq->meta[cmd_index];
-
-       txq->time_stamp = jiffies;
-
-       pci_unmap_single(priv->pci_dev,
-                        dma_unmap_addr(meta, mapping),
-                        dma_unmap_len(meta, len),
-                        PCI_DMA_BIDIRECTIONAL);
-
-       /* Input error checking is done when commands are added to queue. */
-       if (meta->flags & CMD_WANT_SKB) {
-               meta->source->reply_page = (unsigned long)rxb_addr(rxb);
-               rxb->page = NULL;
-       } else if (meta->callback)
-               meta->callback(priv, cmd, pkt);
-
-       spin_lock_irqsave(&priv->hcmd_lock, flags);
-
-       iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
-
-       if (!(meta->flags & CMD_ASYNC)) {
-               clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-               IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
-                              iwl_legacy_get_cmd_string(cmd->hdr.cmd));
-               wake_up(&priv->wait_command_queue);
-       }
-
-       /* Mark as unmapped */
-       meta->flags = 0;
-
-       spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644 (file)
index b282d86..0000000
+++ /dev/null
@@ -1,4016 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME       "iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION        \
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION  IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT  "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR     "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
-       .sw_crypto = 1,
-       .restart_fw = 1,
-       .disable_hw_scan = 1,
-       /* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39  is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN      - Force MAIN antenna
- * IWL_ANTENNA_AUX       - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
-       switch (iwl3945_mod_params.antenna) {
-       case IWL_ANTENNA_DIVERSITY:
-               return 0;
-
-       case IWL_ANTENNA_MAIN:
-               if (eeprom->antenna_switch_type)
-                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
-               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
-       case IWL_ANTENNA_AUX:
-               if (eeprom->antenna_switch_type)
-                       return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-               return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
-       }
-
-       /* bad antenna selector value */
-       IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
-               iwl3945_mod_params.antenna);
-
-       return 0;               /* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
-                                  struct ieee80211_key_conf *keyconf,
-                                  u8 sta_id)
-{
-       unsigned long flags;
-       __le16 key_flags = 0;
-       int ret;
-
-       key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
-       key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
-       if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
-               key_flags |= STA_KEY_MULTICAST_MSK;
-
-       keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-       keyconf->hw_key_idx = keyconf->keyidx;
-       key_flags &= ~STA_KEY_FLG_INVALID;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
-       priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-       memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
-              keyconf->keylen);
-
-       memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
-              keyconf->keylen);
-
-       if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
-                       == STA_KEY_FLG_NO_ENC)
-               priv->stations[sta_id].sta.key.key_offset =
-                                iwl_legacy_get_free_ucode_key_index(priv);
-       /* else, we are overriding an existing key => no need to allocated room
-       * in uCode. */
-
-       WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
-               "no space for a new key");
-
-       priv->stations[sta_id].sta.key.key_flags = key_flags;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
-       IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
-       ret = iwl_legacy_send_add_sta(priv,
-                               &priv->stations[sta_id].sta, CMD_ASYNC);
-
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
-                                 struct ieee80211_key_conf *keyconf,
-                                 u8 sta_id)
-{
-       return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
-                                 struct ieee80211_key_conf *keyconf,
-                                 u8 sta_id)
-{
-       return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
-       unsigned long flags;
-       struct iwl_legacy_addsta_cmd sta_cmd;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
-       memset(&priv->stations[sta_id].sta.key, 0,
-               sizeof(struct iwl4965_keyinfo));
-       priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
-       priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
-       return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
-                       struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
-       int ret = 0;
-
-       keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
-       switch (keyconf->cipher) {
-       case WLAN_CIPHER_SUITE_CCMP:
-               ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
-               break;
-       case WLAN_CIPHER_SUITE_TKIP:
-               ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
-               break;
-       case WLAN_CIPHER_SUITE_WEP40:
-       case WLAN_CIPHER_SUITE_WEP104:
-               ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
-               break;
-       default:
-               IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
-                       keyconf->cipher);
-               ret = -EINVAL;
-       }
-
-       IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
-                     keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-                     sta_id, ret);
-
-       return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
-       int ret = -EOPNOTSUPP;
-
-       return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
-                               struct ieee80211_key_conf *key)
-{
-       if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-           key->cipher == WLAN_CIPHER_SUITE_WEP104)
-               return -EOPNOTSUPP;
-
-       IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
-       return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
-       struct list_head *element;
-
-       IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
-                      priv->frames_count);
-
-       while (!list_empty(&priv->free_frames)) {
-               element = priv->free_frames.next;
-               list_del(element);
-               kfree(list_entry(element, struct iwl3945_frame, list));
-               priv->frames_count--;
-       }
-
-       if (priv->frames_count) {
-               IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
-                           priv->frames_count);
-               priv->frames_count = 0;
-       }
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
-       struct iwl3945_frame *frame;
-       struct list_head *element;
-       if (list_empty(&priv->free_frames)) {
-               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
-               if (!frame) {
-                       IWL_ERR(priv, "Could not allocate frame!\n");
-                       return NULL;
-               }
-
-               priv->frames_count++;
-               return frame;
-       }
-
-       element = priv->free_frames.next;
-       list_del(element);
-       return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
-       memset(frame, 0, sizeof(*frame));
-       list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
-                               struct ieee80211_hdr *hdr,
-                               int left)
-{
-
-       if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
-               return 0;
-
-       if (priv->beacon_skb->len > left)
-               return 0;
-
-       memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
-       return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
-       struct iwl3945_frame *frame;
-       unsigned int frame_size;
-       int rc;
-       u8 rate;
-
-       frame = iwl3945_get_free_frame(priv);
-
-       if (!frame) {
-               IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
-                         "command.\n");
-               return -ENOMEM;
-       }
-
-       rate = iwl_legacy_get_lowest_plcp(priv,
-                               &priv->contexts[IWL_RXON_CTX_BSS]);
-
-       frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
-       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
-                             &frame->u.cmd[0]);
-
-       iwl3945_free_frame(priv, frame);
-
-       return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
-       if (priv->_3945.shared_virt)
-               dma_free_coherent(&priv->pci_dev->dev,
-                                 sizeof(struct iwl3945_shared),
-                                 priv->_3945.shared_virt,
-                                 priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
-                                     struct ieee80211_tx_info *info,
-                                     struct iwl_device_cmd *cmd,
-                                     struct sk_buff *skb_frag,
-                                     int sta_id)
-{
-       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-       struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
-       tx_cmd->sec_ctl = 0;
-
-       switch (keyinfo->cipher) {
-       case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
-               IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
-               break;
-
-       case WLAN_CIPHER_SUITE_TKIP:
-               break;
-
-       case WLAN_CIPHER_SUITE_WEP104:
-               tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
-               /* fall through */
-       case WLAN_CIPHER_SUITE_WEP40:
-               tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
-                   (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
-               memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
-               IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
-                            "with key %d\n", info->control.hw_key->hw_key_idx);
-               break;
-
-       default:
-               IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
-               break;
-       }
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
-                                 struct iwl_device_cmd *cmd,
-                                 struct ieee80211_tx_info *info,
-                                 struct ieee80211_hdr *hdr, u8 std_id)
-{
-       struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-       __le32 tx_flags = tx_cmd->tx_flags;
-       __le16 fc = hdr->frame_control;
-
-       tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
-               tx_flags |= TX_CMD_FLG_ACK_MSK;
-               if (ieee80211_is_mgmt(fc))
-                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-               if (ieee80211_is_probe_resp(fc) &&
-                   !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
-                       tx_flags |= TX_CMD_FLG_TSF_MSK;
-       } else {
-               tx_flags &= (~TX_CMD_FLG_ACK_MSK);
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-       }
-
-       tx_cmd->sta_id = std_id;
-       if (ieee80211_has_morefrags(fc))
-               tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
-       if (ieee80211_is_data_qos(fc)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tx_cmd->tid_tspec = qc[0] & 0xf;
-               tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
-       } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
-       }
-
-       iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
-       tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
-       if (ieee80211_is_mgmt(fc)) {
-               if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
-               else
-                       tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
-       } else {
-               tx_cmd->timeout.pm_frame_timeout = 0;
-       }
-
-       tx_cmd->driver_txop = 0;
-       tx_cmd->tx_flags = tx_flags;
-       tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct iwl3945_tx_cmd *tx_cmd;
-       struct iwl_tx_queue *txq = NULL;
-       struct iwl_queue *q = NULL;
-       struct iwl_device_cmd *out_cmd;
-       struct iwl_cmd_meta *out_meta;
-       dma_addr_t phys_addr;
-       dma_addr_t txcmd_phys;
-       int txq_id = skb_get_queue_mapping(skb);
-       u16 len, idx, hdr_len;
-       u8 id;
-       u8 unicast;
-       u8 sta_id;
-       u8 tid = 0;
-       __le16 fc;
-       u8 wait_write_ptr = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       if (iwl_legacy_is_rfkill(priv)) {
-               IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
-               goto drop_unlock;
-       }
-
-       if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
-               IWL_ERR(priv, "ERROR: No TX rate available.\n");
-               goto drop_unlock;
-       }
-
-       unicast = !is_multicast_ether_addr(hdr->addr1);
-       id = 0;
-
-       fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (ieee80211_is_auth(fc))
-               IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
-       else if (ieee80211_is_assoc_req(fc))
-               IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
-       else if (ieee80211_is_reassoc_req(fc))
-               IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       hdr_len = ieee80211_hdrlen(fc);
-
-       /* Find index into station table for destination station */
-       sta_id = iwl_legacy_sta_id_or_broadcast(
-                       priv, &priv->contexts[IWL_RXON_CTX_BSS],
-                       info->control.sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-                              hdr->addr1);
-               goto drop;
-       }
-
-       IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
-       if (ieee80211_is_data_qos(fc)) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-               if (unlikely(tid >= MAX_TID_COUNT))
-                       goto drop;
-       }
-
-       /* Descriptor for chosen Tx queue */
-       txq = &priv->txq[txq_id];
-       q = &txq->q;
-
-       if ((iwl_legacy_queue_space(q) < q->high_mark))
-               goto drop;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
-
-       /* Set up driver data for this TFD */
-       memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
-       txq->txb[q->write_ptr].skb = skb;
-       txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       /* Init first empty entry in queue's array of Tx/cmd buffers */
-       out_cmd = txq->cmd[idx];
-       out_meta = &txq->meta[idx];
-       tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
-       memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
-       memset(tx_cmd, 0, sizeof(*tx_cmd));
-
-       /*
-        * Set up the Tx-command (not MAC!) header.
-        * Store the chosen Tx queue and TFD index within the sequence field;
-        * after Tx, uCode's Tx response will return this value so driver can
-        * locate the frame within the tx queue and do post-tx processing.
-        */
-       out_cmd->hdr.cmd = REPLY_TX;
-       out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
-                               INDEX_TO_SEQ(q->write_ptr)));
-
-       /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
-       if (info->control.hw_key)
-               iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
-       /* TODO need this for burst mode later on */
-       iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
-       /* set is_hcca to 0; it probably will never be implemented */
-       iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
-       /* Total # bytes to be transmitted */
-       len = (u16)skb->len;
-       tx_cmd->len = cpu_to_le16(len);
-
-       iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-       iwl_legacy_update_stats(priv, true, fc, len);
-       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
-       tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
-       if (!ieee80211_has_morefrags(hdr->frame_control)) {
-               txq->need_update = 1;
-       } else {
-               wait_write_ptr = 1;
-               txq->need_update = 0;
-       }
-
-       IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
-                    le16_to_cpu(out_cmd->hdr.sequence));
-       IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-       iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
-       iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
-                          ieee80211_hdrlen(fc));
-
-       /*
-        * Use the first empty entry in this queue's command buffer array
-        * to contain the Tx command and MAC header concatenated together
-        * (payload data will be in another buffer).
-        * Size of this varies, due to varying MAC header length.
-        * If end is not dword aligned, we'll have 2 extra bytes at the end
-        * of the MAC header (device reads on dword boundaries).
-        * We'll tell device about this padding later.
-        */
-       len = sizeof(struct iwl3945_tx_cmd) +
-                       sizeof(struct iwl_cmd_header) + hdr_len;
-       len = (len + 3) & ~3;
-
-       /* Physical address of this Tx command's header (not MAC header!),
-        * within command buffer array. */
-       txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
-                                   len, PCI_DMA_TODEVICE);
-       /* we do not map meta data ... so we can safely access address to
-        * provide to unmap command*/
-       dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, len);
-
-       /* Add buffer containing Tx command and MAC(!) header to TFD's
-        * first entry */
-       priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                  txcmd_phys, len, 1, 0);
-
-
-       /* Set up TFD's 2nd entry to point directly to remainder of skb,
-        * if any (802.11 null frames have no payload). */
-       len = skb->len - hdr_len;
-       if (len) {
-               phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
-                                          len, PCI_DMA_TODEVICE);
-               priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
-                                                          phys_addr, len,
-                                                          0, U32_PAD(len));
-       }
-
-
-       /* Tell device the write index *just past* this latest filled TFD */
-       q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
-       iwl_legacy_txq_update_write_ptr(priv, txq);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       if ((iwl_legacy_queue_space(q) < q->high_mark)
-           && priv->mac80211_registered) {
-               if (wait_write_ptr) {
-                       spin_lock_irqsave(&priv->lock, flags);
-                       txq->need_update = 1;
-                       iwl_legacy_txq_update_write_ptr(priv, txq);
-                       spin_unlock_irqrestore(&priv->lock, flags);
-               }
-
-               iwl_legacy_stop_queue(priv, txq);
-       }
-
-       return 0;
-
-drop_unlock:
-       spin_unlock_irqrestore(&priv->lock, flags);
-drop:
-       return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
-                              struct ieee80211_measurement_params *params,
-                              u8 type)
-{
-       struct iwl_spectrum_cmd spectrum;
-       struct iwl_rx_packet *pkt;
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
-               .data = (void *)&spectrum,
-               .flags = CMD_WANT_SKB,
-       };
-       u32 add_time = le64_to_cpu(params->start_time);
-       int rc;
-       int spectrum_resp_status;
-       int duration = le16_to_cpu(params->duration);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
-               add_time = iwl_legacy_usecs_to_beacons(priv,
-                       le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
-                       le16_to_cpu(ctx->timing.beacon_interval));
-
-       memset(&spectrum, 0, sizeof(spectrum));
-
-       spectrum.channel_count = cpu_to_le16(1);
-       spectrum.flags =
-           RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
-       spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
-       cmd.len = sizeof(spectrum);
-       spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
-       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
-               spectrum.start_time =
-                       iwl_legacy_add_beacon_time(priv,
-                               priv->_3945.last_beacon_time, add_time,
-                               le16_to_cpu(ctx->timing.beacon_interval));
-       else
-               spectrum.start_time = 0;
-
-       spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
-       spectrum.channels[0].channel = params->channel;
-       spectrum.channels[0].type = type;
-       if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
-               spectrum.flags |= RXON_FLG_BAND_24G_MSK |
-                   RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
-       rc = iwl_legacy_send_cmd_sync(priv, &cmd);
-       if (rc)
-               return rc;
-
-       pkt = (struct iwl_rx_packet *)cmd.reply_page;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
-               rc = -EIO;
-       }
-
-       spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
-       switch (spectrum_resp_status) {
-       case 0:         /* Command will be handled */
-               if (pkt->u.spectrum.id != 0xff) {
-                       IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
-                                               pkt->u.spectrum.id);
-                       priv->measurement_status &= ~MEASUREMENT_READY;
-               }
-               priv->measurement_status |= MEASUREMENT_ACTIVE;
-               rc = 0;
-               break;
-
-       case 1:         /* Command will not be handled */
-               rc = -EAGAIN;
-               break;
-       }
-
-       iwl_legacy_free_pages(priv, cmd.reply_page);
-
-       return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
-                              struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_alive_resp *palive;
-       struct delayed_work *pwork;
-
-       palive = &pkt->u.alive_frame;
-
-       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
-                      "0x%01X 0x%01X\n",
-                      palive->is_valid, palive->ver_type,
-                      palive->ver_subtype);
-
-       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
-               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-               memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
-                      sizeof(struct iwl_alive_resp));
-               pwork = &priv->init_alive_start;
-       } else {
-               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-               memcpy(&priv->card_alive, &pkt->u.alive_frame,
-                      sizeof(struct iwl_alive_resp));
-               pwork = &priv->alive_start;
-               iwl3945_disable_events(priv);
-       }
-
-       /* We delay the ALIVE response by 5ms to
-        * give the HW RF Kill time to activate... */
-       if (palive->is_valid == UCODE_VALID_OK)
-               queue_delayed_work(priv->workqueue, pwork,
-                                  msecs_to_jiffies(5));
-       else
-               IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
-                                struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
-       IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       u8 rate = beacon->beacon_notify_hdr.rate;
-
-       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-               "tsf %d %d rate %d\n",
-               le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
-               beacon->beacon_notify_hdr.failure_frame,
-               le32_to_cpu(beacon->ibss_mgr_status),
-               le32_to_cpu(beacon->high_tsf),
-               le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
-       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
-       unsigned long status = priv->status;
-
-       IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
-                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
-                         (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-       if (flags & HW_CARD_DISABLED)
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
-       iwl_legacy_scan_cancel(priv);
-
-       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
-            test_bit(STATUS_RF_KILL_HW, &priv->status)))
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-                               test_bit(STATUS_RF_KILL_HW, &priv->status));
-       else
-               wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
-       priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
-       priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
-       priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
-       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
-       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-                       iwl_legacy_rx_spectrum_measure_notif;
-       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
-       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-           iwl_legacy_rx_pm_debug_statistics_notif;
-       priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
-       /*
-        * The same handler is used for both the REPLY to a discrete
-        * statistics request from the host as well as for the periodic
-        * statistics notifications (after received beacons) from the uCode.
-        */
-       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
-       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
-       iwl_legacy_setup_rx_scan_handlers(priv);
-       priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
-       /* Set up hardware specific Rx handlers */
-       iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt.  The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
- *   were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
- *                            iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx()         Detach iwl_rx_mem_buffers from pool up to the
- *                            READ INDEX, detaching the SKB from the pool.
- *                            Moves the packet buffer from queue to rx_used.
- *                            Calls iwl3945_rx_queue_restock to refill any empty
- *                            slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
-                                         dma_addr_t dma_addr)
-{
-       return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct list_head *element;
-       struct iwl_rx_mem_buffer *rxb;
-       unsigned long flags;
-       int write;
-
-       spin_lock_irqsave(&rxq->lock, flags);
-       write = rxq->write & ~0x7;
-       while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
-               /* Get next free Rx buffer, remove from free list */
-               element = rxq->rx_free.next;
-               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-               list_del(element);
-
-               /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
-               rxq->queue[rxq->write] = rxb;
-               rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
-               rxq->free_count--;
-       }
-       spin_unlock_irqrestore(&rxq->lock, flags);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               queue_work(priv->workqueue, &priv->rx_replenish);
-
-
-       /* If we've added more space for the firmware to place data, tell it.
-        * Increment device's write pointer in multiples of 8. */
-       if ((rxq->write_actual != (rxq->write & ~0x7))
-           || (abs(rxq->write - rxq->read) > 7)) {
-               spin_lock_irqsave(&rxq->lock, flags);
-               rxq->need_update = 1;
-               spin_unlock_irqrestore(&rxq->lock, flags);
-               iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-       }
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       struct list_head *element;
-       struct iwl_rx_mem_buffer *rxb;
-       struct page *page;
-       unsigned long flags;
-       gfp_t gfp_mask = priority;
-
-       while (1) {
-               spin_lock_irqsave(&rxq->lock, flags);
-
-               if (list_empty(&rxq->rx_used)) {
-                       spin_unlock_irqrestore(&rxq->lock, flags);
-                       return;
-               }
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (priv->hw_params.rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
-               /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
-                       break;
-               }
-
-               spin_lock_irqsave(&rxq->lock, flags);
-               if (list_empty(&rxq->rx_used)) {
-                       spin_unlock_irqrestore(&rxq->lock, flags);
-                       __free_pages(page, priv->hw_params.rx_page_order);
-                       return;
-               }
-               element = rxq->rx_used.next;
-               rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
-               list_del(element);
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               rxb->page = page;
-               /* Get physical address of RB/SKB */
-               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-
-               spin_lock_irqsave(&rxq->lock, flags);
-
-               list_add_tail(&rxb->list, &rxq->rx_free);
-               rxq->free_count++;
-               priv->alloc_rxb_page++;
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-       }
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       unsigned long flags;
-       int i;
-       spin_lock_irqsave(&rxq->lock, flags);
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
-       /* Fill the rx_used queue with _all_ of the Rx buffers */
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
-               /* In the reset function, these buffers may have been allocated
-                * to an SKB, so we need to unmap and free potential storage */
-               if (rxq->pool[i].page != NULL) {
-                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-               list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-       }
-
-       /* Set us so that we have processed and used all buffers, but have
-        * not restocked the Rx queue with fresh buffers */
-       rxq->read = rxq->write = 0;
-       rxq->write_actual = 0;
-       rxq->free_count = 0;
-       spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
-       struct iwl_priv *priv = data;
-       unsigned long flags;
-
-       iwl3945_rx_allocate(priv, GFP_KERNEL);
-
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl3945_rx_queue_restock(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
-       iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
-       iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
-       int i;
-       for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-               if (rxq->pool[i].page != NULL) {
-                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
-                               PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       __iwl_legacy_free_pages(priv, rxq->pool[i].page);
-                       rxq->pool[i].page = NULL;
-               }
-       }
-
-       dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
-                         rxq->bd_dma);
-       dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
-                         rxq->rb_stts, rxq->rb_stts_dma);
-       rxq->bd = NULL;
-       rxq->rb_stts  = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/*      0   1   2   3   4   5   6   7   8   9 */
-        0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
-       20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
-       26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
-       29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
-       32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
-       34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
-       36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
-       37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
-       38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
-       39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- *   (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
-       /* 1000:1 or higher just report as 60 dB */
-       if (sig_ratio >= 1000)
-               return 60;
-
-       /* 100:1 or higher, divide by 10 and use table,
-        *   add 20 dB to make up for divide by 10 */
-       if (sig_ratio >= 100)
-               return 20 + (int)ratio2dB[sig_ratio/10];
-
-       /* We shouldn't see this */
-       if (sig_ratio < 1)
-               return 0;
-
-       /* Use table for ratios 1:1 - 99:1 */
-       return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
-       struct iwl_rx_mem_buffer *rxb;
-       struct iwl_rx_packet *pkt;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       u32 r, i;
-       int reclaim;
-       unsigned long flags;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty = 0;
-
-       /* uCode's read index (stored in shared DRAM) indicates the last Rx
-        * buffer that the driver may process (last buffer filled by ucode). */
-       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
-       i = rxq->read;
-
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-       /* Rx interrupt, but nothing sent from uCode */
-       if (i == r)
-               IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
-       while (i != r) {
-               int len;
-
-               rxb = rxq->queue[i];
-
-               /* If an RXB doesn't have a Rx queue slot associated with it,
-                * then a bug has been introduced in the queue refilling
-                * routines -- catch it here */
-               BUG_ON(rxb == NULL);
-
-               rxq->queue[i] = NULL;
-
-               pci_unmap_page(priv->pci_dev, rxb->page_dma,
-                              PAGE_SIZE << priv->hw_params.rx_page_order,
-                              PCI_DMA_FROMDEVICE);
-               pkt = rxb_addr(rxb);
-
-               len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-               len += sizeof(u32); /* account for status word */
-               trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
-               /* Reclaim a command buffer only if this packet is a response
-                *   to a (driver-originated) command.
-                * If the packet (e.g. Rx frame) originated from uCode,
-                *   there is no command buffer to reclaim.
-                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-                *   but apparently a few don't get set; catch them here. */
-               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-                       (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
-                       (pkt->hdr.cmd != REPLY_TX);
-
-               /* Based on type of command response or notification,
-                *   handle those that need handling via function in
-                *   rx_handlers table.  See iwl3945_setup_rx_handlers() */
-               if (priv->rx_handlers[pkt->hdr.cmd]) {
-                       IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
-                       iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
-                       priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
-                       priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
-               } else {
-                       /* No handling needed */
-                       IWL_DEBUG_RX(priv,
-                               "r %d i %d No handler needed for %s, 0x%02x\n",
-                               r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
-                               pkt->hdr.cmd);
-               }
-
-               /*
-                * XXX: After here, we should always check rxb->page
-                * against NULL before touching it or its virtual
-                * memory (pkt). Because some rx_handler might have
-                * already taken or freed the pages.
-                */
-
-               if (reclaim) {
-                       /* Invoke any callbacks, transfer the buffer to caller,
-                        * and fire off the (possibly) blocking iwl_legacy_send_cmd()
-                        * as we reclaim the driver command queue */
-                       if (rxb->page)
-                               iwl_legacy_tx_cmd_complete(priv, rxb);
-                       else
-                               IWL_WARN(priv, "Claim null rxb?\n");
-               }
-
-               /* Reuse the page if possible. For notification packets and
-                * SKBs that fail to Rx correctly, add them back into the
-                * rx_free list for reuse later. */
-               spin_lock_irqsave(&rxq->lock, flags);
-               if (rxb->page != NULL) {
-                       rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
-                               0, PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       list_add_tail(&rxb->list, &rxq->rx_free);
-                       rxq->free_count++;
-               } else
-                       list_add_tail(&rxb->list, &rxq->rx_used);
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode won't assert. */
-               if (fill_rx) {
-                       count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               iwl3945_rx_replenish_now(priv);
-                               count = 0;
-                       }
-               }
-       }
-
-       /* Backtrack one entry */
-       rxq->read = i;
-       if (fill_rx)
-               iwl3945_rx_replenish_now(priv);
-       else
-               iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
-{
-       /* wait to make sure we flush pending tasklet*/
-       synchronize_irq(priv->pci_dev->irq);
-       tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *iwl3945_desc_lookup(int i)
-{
-       switch (i) {
-       case 1:
-               return "FAIL";
-       case 2:
-               return "BAD_PARAM";
-       case 3:
-               return "BAD_CHECKSUM";
-       case 4:
-               return "NMI_INTERRUPT";
-       case 5:
-               return "SYSASSERT";
-       case 6:
-               return "FATAL_ERROR";
-       }
-
-       return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
-       u32 i;
-       u32 desc, time, count, base, data1;
-       u32 blink1, blink2, ilink1, ilink2;
-
-       base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
-       if (!iwl3945_hw_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
-               return;
-       }
-
-
-       count = iwl_legacy_read_targ_mem(priv, base);
-
-       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
-               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
-               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
-                       priv->status, count);
-       }
-
-       IWL_ERR(priv, "Desc       Time       asrtPC  blink2 "
-                 "ilink1  nmiPC   Line\n");
-       for (i = ERROR_START_OFFSET;
-            i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
-            i += ERROR_ELEM_SIZE) {
-               desc = iwl_legacy_read_targ_mem(priv, base + i);
-               time =
-                   iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
-               blink1 =
-                   iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
-               blink2 =
-                   iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
-               ilink1 =
-                   iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
-               ilink2 =
-                   iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
-               data1 =
-                   iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
-               IWL_ERR(priv,
-                       "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
-                       iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
-                       ilink1, ilink2, data1);
-               trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
-                                       0, blink1, blink2, ilink1, ilink2);
-       }
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
-       u32 inta, handled = 0;
-       u32 inta_fh;
-       unsigned long flags;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       u32 inta_mask;
-#endif
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Ack/clear/reset pending uCode interrupts.
-        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
-        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
-       inta = iwl_read32(priv, CSR_INT);
-       iwl_write32(priv, CSR_INT, inta);
-
-       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
-        * Any new interrupts that happen after this, either while we're
-        * in this tasklet, or later, will show up in next ISR/tasklet. */
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-       iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
-               /* just for debug */
-               inta_mask = iwl_read32(priv, CSR_INT_MASK);
-               IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                             inta, inta_mask, inta_fh);
-       }
-#endif
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
-        * atomic, make sure that inta covers all the interrupts that
-        * we've discovered, even if FH interrupt came in just after
-        * reading CSR_INT. */
-       if (inta_fh & CSR39_FH_INT_RX_MASK)
-               inta |= CSR_INT_BIT_FH_RX;
-       if (inta_fh & CSR39_FH_INT_TX_MASK)
-               inta |= CSR_INT_BIT_FH_TX;
-
-       /* Now service all interrupt bits discovered above. */
-       if (inta & CSR_INT_BIT_HW_ERR) {
-               IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
-
-               /* Tell the device to stop sending interrupts */
-               iwl_legacy_disable_interrupts(priv);
-
-               priv->isr_stats.hw++;
-               iwl_legacy_irq_handle_error(priv);
-
-               handled |= CSR_INT_BIT_HW_ERR;
-
-               return;
-       }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-               /* NIC fires this, but we don't use it, redundant with WAKEUP */
-               if (inta & CSR_INT_BIT_SCD) {
-                       IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
-                                     "the frame/frames.\n");
-                       priv->isr_stats.sch++;
-               }
-
-               /* Alive notification via Rx interrupt will do the real work */
-               if (inta & CSR_INT_BIT_ALIVE) {
-                       IWL_DEBUG_ISR(priv, "Alive interrupt\n");
-                       priv->isr_stats.alive++;
-               }
-       }
-#endif
-       /* Safely ignore these bits for debug checks below */
-       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
-       /* Error detected by uCode */
-       if (inta & CSR_INT_BIT_SW_ERR) {
-               IWL_ERR(priv, "Microcode SW error detected. "
-                       "Restarting 0x%X.\n", inta);
-               priv->isr_stats.sw++;
-               iwl_legacy_irq_handle_error(priv);
-               handled |= CSR_INT_BIT_SW_ERR;
-       }
-
-       /* uCode wakes up after power-down sleep */
-       if (inta & CSR_INT_BIT_WAKEUP) {
-               IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
-               iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
-               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
-               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
-               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
-               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
-               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
-               iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
-
-               priv->isr_stats.wakeup++;
-               handled |= CSR_INT_BIT_WAKEUP;
-       }
-
-       /* All uCode command responses, including Tx command responses,
-        * Rx "responses" (frame-received notification), and other
-        * notifications from uCode come through here*/
-       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-               iwl3945_rx_handle(priv);
-               priv->isr_stats.rx++;
-               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
-       }
-
-       if (inta & CSR_INT_BIT_FH_TX) {
-               IWL_DEBUG_ISR(priv, "Tx interrupt\n");
-               priv->isr_stats.tx++;
-
-               iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
-               iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
-                                       (FH39_SRVC_CHNL), 0x0);
-               handled |= CSR_INT_BIT_FH_TX;
-       }
-
-       if (inta & ~handled) {
-               IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
-               priv->isr_stats.unhandled++;
-       }
-
-       if (inta & ~priv->inta_mask) {
-               IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
-                        inta & ~priv->inta_mask);
-               IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
-       }
-
-       /* Re-enable all interrupts */
-       /* only Re-enable if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_legacy_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-               inta = iwl_read32(priv, CSR_INT);
-               inta_mask = iwl_read32(priv, CSR_INT_MASK);
-               inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-               IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
-                       "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
-       }
-#endif
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
-                                        enum ieee80211_band band,
-                                    u8 is_active, u8 n_probes,
-                                    struct iwl3945_scan_channel *scan_ch,
-                                    struct ieee80211_vif *vif)
-{
-       struct ieee80211_channel *chan;
-       const struct ieee80211_supported_band *sband;
-       const struct iwl_channel_info *ch_info;
-       u16 passive_dwell = 0;
-       u16 active_dwell = 0;
-       int added, i;
-
-       sband = iwl_get_hw_mode(priv, band);
-       if (!sband)
-               return 0;
-
-       active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
-       passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
-       if (passive_dwell <= active_dwell)
-               passive_dwell = active_dwell + 1;
-
-       for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
-               chan = priv->scan_request->channels[i];
-
-               if (chan->band != band)
-                       continue;
-
-               scan_ch->channel = chan->hw_value;
-
-               ch_info = iwl_legacy_get_channel_info(priv, band,
-                                                       scan_ch->channel);
-               if (!iwl_legacy_is_channel_valid(ch_info)) {
-                       IWL_DEBUG_SCAN(priv,
-                               "Channel %d is INVALID for this band.\n",
-                              scan_ch->channel);
-                       continue;
-               }
-
-               scan_ch->active_dwell = cpu_to_le16(active_dwell);
-               scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-               /* If passive , set up for auto-switch
-                *  and use long active_dwell time.
-                */
-               if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
-                   (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
-                       scan_ch->type = 0;      /* passive */
-                       if (IWL_UCODE_API(priv->ucode_ver) == 1)
-                               scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
-               } else {
-                       scan_ch->type = 1;      /* active */
-               }
-
-               /* Set direct probe bits. These may be used both for active
-                * scan channels (probes gets sent right away),
-                * or for passive channels (probes get se sent only after
-                * hearing clear Rx packet).*/
-               if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
-                       if (n_probes)
-                               scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
-               } else {
-                       /* uCode v1 does not allow setting direct probe bits on
-                        * passive channel. */
-                       if ((scan_ch->type & 1) && n_probes)
-                               scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
-               }
-
-               /* Set txpower levels to defaults */
-               scan_ch->tpc.dsp_atten = 110;
-               /* scan_pwr_info->tpc.dsp_atten; */
-
-               /*scan_pwr_info->tpc.tx_gain; */
-               if (band == IEEE80211_BAND_5GHZ)
-                       scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
-               else {
-                       scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
-                       /* NOTE: if we were doing 6Mb OFDM for scans we'd use
-                        * power level:
-                        * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
-                        */
-               }
-
-               IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
-                              scan_ch->channel,
-                              (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
-                              (scan_ch->type & 1) ?
-                              active_dwell : passive_dwell);
-
-               scan_ch++;
-               added++;
-       }
-
-       IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
-       return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
-                             struct ieee80211_rate *rates)
-{
-       int i;
-
-       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-               rates[i].bitrate = iwl3945_rates[i].ieee * 5;
-               rates[i].hw_value = i; /* Rate scaling will work on indexes */
-               rates[i].hw_value_short = i;
-               rates[i].flags = 0;
-               if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
-                       /*
-                        * If CCK != 1M then set short preamble rate flag.
-                        */
-                       rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
-                               0 : IEEE80211_RATE_SHORT_PREAMBLE;
-               }
-       }
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- *     looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-       u32 val;
-       u32 save_len = len;
-       int rc = 0;
-       u32 errcnt;
-
-       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-       iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-                              IWL39_RTC_INST_LOWER_BOUND);
-
-       errcnt = 0;
-       for (; len > 0; len -= sizeof(u32), image++) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "uCode INST section is invalid at "
-                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
-                                 save_len - len, val, le32_to_cpu(*image));
-                       rc = -EIO;
-                       errcnt++;
-                       if (errcnt >= 20)
-                               break;
-               }
-       }
-
-
-       if (!errcnt)
-               IWL_DEBUG_INFO(priv,
-                       "ucode image in INSTRUCTION memory is good\n");
-
-       return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- *   using sample data 100 bytes apart.  If these sample points are good,
- *   it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
-       u32 val;
-       int rc = 0;
-       u32 errcnt = 0;
-       u32 i;
-
-       IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
-       for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
-               /* read data comes through single port, auto-incr addr */
-               /* NOTE: Use the debugless read so we don't flood kernel log
-                * if IWL_DL_IO is set */
-               iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
-                       i + IWL39_RTC_INST_LOWER_BOUND);
-               val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-               if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
-                       IWL_ERR(priv, "uCode INST section is invalid at "
-                                 "offset 0x%x, is 0x%x, s/b 0x%x\n",
-                                 i, val, *image);
-#endif
-                       rc = -EIO;
-                       errcnt++;
-                       if (errcnt >= 3)
-                               break;
-               }
-       }
-
-       return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- *    and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
-       __le32 *image;
-       u32 len;
-       int rc = 0;
-
-       /* Try bootstrap */
-       image = (__le32 *)priv->ucode_boot.v_addr;
-       len = priv->ucode_boot.len;
-       rc = iwl3945_verify_inst_sparse(priv, image, len);
-       if (rc == 0) {
-               IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       /* Try initialize */
-       image = (__le32 *)priv->ucode_init.v_addr;
-       len = priv->ucode_init.len;
-       rc = iwl3945_verify_inst_sparse(priv, image, len);
-       if (rc == 0) {
-               IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       /* Try runtime/protocol */
-       image = (__le32 *)priv->ucode_code.v_addr;
-       len = priv->ucode_code.len;
-       rc = iwl3945_verify_inst_sparse(priv, image, len);
-       if (rc == 0) {
-               IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
-               return 0;
-       }
-
-       IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
-       /* Since nothing seems to match, show first several data entries in
-        * instruction SRAM, so maybe visual inspection will give a clue.
-        * Selection of bootstrap image (vs. other images) is arbitrary. */
-       image = (__le32 *)priv->ucode_boot.v_addr;
-       len = priv->ucode_boot.len;
-       rc = iwl3945_verify_inst_full(priv, image, len);
-
-       return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
-       /* Remove all resets to allow NIC to operate */
-       iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item)                                                \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{                                                                      \
-       return le32_to_cpu(ucode->v1.item);                             \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
-       return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
-       return (u8 *) ucode->v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
-       const struct iwl_ucode_header *ucode;
-       int ret = -EINVAL, index;
-       const struct firmware *ucode_raw;
-       /* firmware file name contains uCode/driver compatibility version */
-       const char *name_pre = priv->cfg->fw_name_pre;
-       const unsigned int api_max = priv->cfg->ucode_api_max;
-       const unsigned int api_min = priv->cfg->ucode_api_min;
-       char buf[25];
-       u8 *src;
-       size_t len;
-       u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
-       /* Ask kernel firmware_class module to get the boot firmware off disk.
-        * request_firmware() is synchronous, file is in memory on return. */
-       for (index = api_max; index >= api_min; index--) {
-               sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
-               ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
-               if (ret < 0) {
-                       IWL_ERR(priv, "%s firmware file req failed: %d\n",
-                                 buf, ret);
-                       if (ret == -ENOENT)
-                               continue;
-                       else
-                               goto error;
-               } else {
-                       if (index < api_max)
-                               IWL_ERR(priv, "Loaded firmware %s, "
-                                       "which is deprecated. "
-                                       " Please use API v%u instead.\n",
-                                         buf, api_max);
-                       IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
-                                      "(%zd bytes) from disk\n",
-                                      buf, ucode_raw->size);
-                       break;
-               }
-       }
-
-       if (ret < 0)
-               goto error;
-
-       /* Make sure that we got at least our header! */
-       if (ucode_raw->size <  iwl3945_ucode_get_header_size(1)) {
-               IWL_ERR(priv, "File size way too small!\n");
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       /* Data from ucode file:  header followed by uCode images */
-       ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
-       priv->ucode_ver = le32_to_cpu(ucode->ver);
-       api_ver = IWL_UCODE_API(priv->ucode_ver);
-       inst_size = iwl3945_ucode_get_inst_size(ucode);
-       data_size = iwl3945_ucode_get_data_size(ucode);
-       init_size = iwl3945_ucode_get_init_size(ucode);
-       init_data_size = iwl3945_ucode_get_init_data_size(ucode);
-       boot_size = iwl3945_ucode_get_boot_size(ucode);
-       src = iwl3945_ucode_get_data(ucode);
-
-       /* api_ver should match the api version forming part of the
-        * firmware filename ... but we don't check for that and only rely
-        * on the API version read from firmware header from here on forward */
-
-       if (api_ver < api_min || api_ver > api_max) {
-               IWL_ERR(priv, "Driver unable to support your firmware API. "
-                         "Driver supports v%u, firmware is v%u.\n",
-                         api_max, api_ver);
-               priv->ucode_ver = 0;
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (api_ver != api_max)
-               IWL_ERR(priv, "Firmware has old API version. Expected %u, "
-                         "got %u. New firmware can be obtained "
-                         "from http://www.intellinuxwireless.org.\n",
-                         api_max, api_ver);
-
-       IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
-               IWL_UCODE_MAJOR(priv->ucode_ver),
-               IWL_UCODE_MINOR(priv->ucode_ver),
-               IWL_UCODE_API(priv->ucode_ver),
-               IWL_UCODE_SERIAL(priv->ucode_ver));
-
-       snprintf(priv->hw->wiphy->fw_version,
-                sizeof(priv->hw->wiphy->fw_version),
-                "%u.%u.%u.%u",
-                IWL_UCODE_MAJOR(priv->ucode_ver),
-                IWL_UCODE_MINOR(priv->ucode_ver),
-                IWL_UCODE_API(priv->ucode_ver),
-                IWL_UCODE_SERIAL(priv->ucode_ver));
-
-       IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
-                      priv->ucode_ver);
-       IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
-                      inst_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
-                      data_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
-                      init_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
-                      init_data_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
-                      boot_size);
-
-
-       /* Verify size of file vs. image size info in file's header */
-       if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
-               inst_size + data_size + init_size +
-               init_data_size + boot_size) {
-
-               IWL_DEBUG_INFO(priv,
-                       "uCode file size %zd does not match expected size\n",
-                       ucode_raw->size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       /* Verify that uCode images will fit in card's SRAM */
-       if (inst_size > IWL39_MAX_INST_SIZE) {
-               IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
-                              inst_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       if (data_size > IWL39_MAX_DATA_SIZE) {
-               IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
-                              data_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (init_size > IWL39_MAX_INST_SIZE) {
-               IWL_DEBUG_INFO(priv,
-                               "uCode init instr len %d too large to fit in\n",
-                               init_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (init_data_size > IWL39_MAX_DATA_SIZE) {
-               IWL_DEBUG_INFO(priv,
-                               "uCode init data len %d too large to fit in\n",
-                               init_data_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-       if (boot_size > IWL39_MAX_BSM_SIZE) {
-               IWL_DEBUG_INFO(priv,
-                               "uCode boot instr len %d too large to fit in\n",
-                               boot_size);
-               ret = -EINVAL;
-               goto err_release;
-       }
-
-       /* Allocate ucode buffers for card's bus-master loading ... */
-
-       /* Runtime instructions and 2 copies of data:
-        * 1) unmodified from disk
-        * 2) backup cache for save/restore during power-downs */
-       priv->ucode_code.len = inst_size;
-       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
-       priv->ucode_data.len = data_size;
-       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
-       priv->ucode_data_backup.len = data_size;
-       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
-       if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
-           !priv->ucode_data_backup.v_addr)
-               goto err_pci_alloc;
-
-       /* Initialization instructions and data */
-       if (init_size && init_data_size) {
-               priv->ucode_init.len = init_size;
-               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
-               priv->ucode_init_data.len = init_data_size;
-               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
-               if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
-                       goto err_pci_alloc;
-       }
-
-       /* Bootstrap (instructions only, no data) */
-       if (boot_size) {
-               priv->ucode_boot.len = boot_size;
-               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
-               if (!priv->ucode_boot.v_addr)
-                       goto err_pci_alloc;
-       }
-
-       /* Copy images into buffers for card's bus-master reads ... */
-
-       /* Runtime instructions (first block of data in file) */
-       len = inst_size;
-       IWL_DEBUG_INFO(priv,
-               "Copying (but not loading) uCode instr len %zd\n", len);
-       memcpy(priv->ucode_code.v_addr, src, len);
-       src += len;
-
-       IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
-               priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
-       /* Runtime data (2nd block)
-        * NOTE:  Copy into backup buffer will be done in iwl3945_up()  */
-       len = data_size;
-       IWL_DEBUG_INFO(priv,
-               "Copying (but not loading) uCode data len %zd\n", len);
-       memcpy(priv->ucode_data.v_addr, src, len);
-       memcpy(priv->ucode_data_backup.v_addr, src, len);
-       src += len;
-
-       /* Initialization instructions (3rd block) */
-       if (init_size) {
-               len = init_size;
-               IWL_DEBUG_INFO(priv,
-                       "Copying (but not loading) init instr len %zd\n", len);
-               memcpy(priv->ucode_init.v_addr, src, len);
-               src += len;
-       }
-
-       /* Initialization data (4th block) */
-       if (init_data_size) {
-               len = init_data_size;
-               IWL_DEBUG_INFO(priv,
-                       "Copying (but not loading) init data len %zd\n", len);
-               memcpy(priv->ucode_init_data.v_addr, src, len);
-               src += len;
-       }
-
-       /* Bootstrap instructions (5th block) */
-       len = boot_size;
-       IWL_DEBUG_INFO(priv,
-               "Copying (but not loading) boot instr len %zd\n", len);
-       memcpy(priv->ucode_boot.v_addr, src, len);
-
-       /* We have our copies now, allow OS release its copies */
-       release_firmware(ucode_raw);
-       return 0;
-
- err_pci_alloc:
-       IWL_ERR(priv, "failed to allocate pci memory\n");
-       ret = -ENOMEM;
-       iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
-       release_firmware(ucode_raw);
-
- error:
-       return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
-       dma_addr_t pinst;
-       dma_addr_t pdata;
-
-       /* bits 31:0 for 3945 */
-       pinst = priv->ucode_code.p_addr;
-       pdata = priv->ucode_data_backup.p_addr;
-
-       /* Tell bootstrap uCode where to find image to load */
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
-       iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
-                                priv->ucode_data.len);
-
-       /* Inst byte count must be last to set up, bit 31 signals uCode
-        *   that all new ptr/size info is in place */
-       iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
-                                priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
-       IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
-       return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
-       /* Check alive response for "valid" sign from uCode */
-       if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
-               goto restart;
-       }
-
-       /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "initialize" alive if code weren't properly loaded.  */
-       if (iwl3945_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
-               goto restart;
-       }
-
-       /* Send pointers to protocol/runtime uCode image ... init code will
-        * load and launch runtime uCode, which will send us another "Alive"
-        * notification. */
-       IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-       if (iwl3945_set_ucode_ptrs(priv)) {
-               /* Runtime instruction load won't happen;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
-               goto restart;
-       }
-       return;
-
- restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- *                   from protocol/runtime uCode (initialization uCode's
- *                   Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
-       int thermal_spin = 0;
-       u32 rfkill;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
-       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Alive failed.\n");
-               goto restart;
-       }
-
-       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "runtime" alive if code weren't properly loaded.  */
-       if (iwl3945_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
-               goto restart;
-       }
-
-       rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
-       IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
-       if (rfkill & 0x1) {
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-               /* if RFKILL is not on, then wait for thermal
-                * sensor in adapter to kick in */
-               while (iwl3945_hw_get_temperature(priv) == 0) {
-                       thermal_spin++;
-                       udelay(10);
-               }
-
-               if (thermal_spin)
-                       IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
-                                      thermal_spin * 10);
-       } else
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-       /* After the ALIVE response, we can send commands to 3945 uCode */
-       set_bit(STATUS_ALIVE, &priv->status);
-
-       /* Enable watchdog to monitor the driver tx queues */
-       iwl_legacy_setup_watchdog(priv);
-
-       if (iwl_legacy_is_rfkill(priv))
-               return;
-
-       ieee80211_wake_queues(priv->hw);
-
-       priv->active_rate = IWL_RATES_MASK_3945;
-
-       iwl_legacy_power_update_mode(priv, true);
-
-       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-               struct iwl3945_rxon_cmd *active_rxon =
-                               (struct iwl3945_rxon_cmd *)(&ctx->active);
-
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       } else {
-               /* Initialize our rx_config data */
-               iwl_legacy_connection_init_rx_config(priv, ctx);
-       }
-
-       /* Configure Bluetooth device coexistence support */
-       iwl_legacy_send_bt_config(priv);
-
-       set_bit(STATUS_READY, &priv->status);
-
-       /* Configure the adapter for unassociated operation */
-       iwl3945_commit_rxon(priv, ctx);
-
-       iwl3945_reg_txpower_periodic(priv);
-
-       IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-       wake_up(&priv->wait_command_queue);
-
-       return;
-
- restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       int exit_pending;
-
-       IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
-       iwl_legacy_scan_cancel_timeout(priv, 200);
-
-       exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
-        * to prevent rearm timer */
-       del_timer_sync(&priv->watchdog);
-
-       /* Station information will now be cleared in device */
-       iwl_legacy_clear_ucode_stations(priv, NULL);
-       iwl_legacy_dealloc_bcast_stations(priv);
-       iwl_legacy_clear_driver_stations(priv);
-
-       /* Unblock any waiting calls */
-       wake_up_all(&priv->wait_command_queue);
-
-       /* Wipe out the EXIT_PENDING status bit if we are not actually
-        * exiting the module */
-       if (!exit_pending)
-               clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* stop and reset the on-board processor */
-       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-       /* tell the device to stop sending interrupts */
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_legacy_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       iwl3945_synchronize_irq(priv);
-
-       if (priv->mac80211_registered)
-               ieee80211_stop_queues(priv->hw);
-
-       /* If we have not previously called iwl3945_init() then
-        * clear all bits but the RF Kill bits and return */
-       if (!iwl_legacy_is_init(priv)) {
-               priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-                                       STATUS_RF_KILL_HW |
-                              test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-                                       STATUS_GEO_CONFIGURED |
-                               test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-                                       STATUS_EXIT_PENDING;
-               goto exit;
-       }
-
-       /* ...otherwise clear out all the status bits but the RF Kill
-        * bit and continue taking the NIC down. */
-       priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-                               STATUS_RF_KILL_HW |
-                       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-                               STATUS_GEO_CONFIGURED |
-                       test_bit(STATUS_FW_ERROR, &priv->status) <<
-                               STATUS_FW_ERROR |
-                       test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-                               STATUS_EXIT_PENDING;
-
-       iwl3945_hw_txq_ctx_stop(priv);
-       iwl3945_hw_rxq_stop(priv);
-
-       /* Power-down device's busmaster DMA clocks */
-       iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-       udelay(5);
-
-       /* Stop the device, and put it in low power state */
-       iwl_legacy_apm_stop(priv);
-
- exit:
-       memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-       priv->beacon_skb = NULL;
-
-       /* clear out any free frames */
-       iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
-       mutex_lock(&priv->mutex);
-       __iwl3945_down(priv);
-       mutex_unlock(&priv->mutex);
-
-       iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       unsigned long flags;
-       u8 sta_id;
-
-       spin_lock_irqsave(&priv->sta_lock, flags);
-       sta_id = iwl_legacy_prep_station(priv, ctx,
-                                       iwlegacy_bcast_addr, false, NULL);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_ERR(priv, "Unable to prepare broadcast station\n");
-               spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-               return -EINVAL;
-       }
-
-       priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
-       priv->stations[sta_id].used |= IWL_STA_BCAST;
-       spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-       return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
-       int rc, i;
-
-       rc = iwl3945_alloc_bcast_station(priv);
-       if (rc)
-               return rc;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-               return -EIO;
-       }
-
-       if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
-               IWL_ERR(priv, "ucode not available for device bring up\n");
-               return -EIO;
-       }
-
-       /* If platform's RF_KILL switch is NOT set to KILL */
-       if (iwl_read32(priv, CSR_GP_CNTRL) &
-                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-       else {
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-               IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
-               return -ENODEV;
-       }
-
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
-       rc = iwl3945_hw_nic_init(priv);
-       if (rc) {
-               IWL_ERR(priv, "Unable to int nic\n");
-               return rc;
-       }
-
-       /* make sure rfkill handshake bits are cleared */
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-       /* clear (again), then enable host interrupts */
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-       iwl_legacy_enable_interrupts(priv);
-
-       /* really make sure rfkill handshake bits are cleared */
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
-       /* Copy original ucode data image from disk into backup cache.
-        * This will be used to initialize the on-board processor's
-        * data SRAM for a clean start when the runtime program first loads. */
-       memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
-              priv->ucode_data.len);
-
-       /* We return success when we resume from suspend and rf_kill is on. */
-       if (test_bit(STATUS_RF_KILL_HW, &priv->status))
-               return 0;
-
-       for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
-               /* load bootstrap state machine,
-                * load bootstrap program into processor's memory,
-                * prepare to load the "initialize" uCode */
-               rc = priv->cfg->ops->lib->load_ucode(priv);
-
-               if (rc) {
-                       IWL_ERR(priv,
-                               "Unable to set up bootstrap uCode: %d\n", rc);
-                       continue;
-               }
-
-               /* start card; "initialize" will load runtime ucode */
-               iwl3945_nic_start(priv);
-
-               IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
-               return 0;
-       }
-
-       set_bit(STATUS_EXIT_PENDING, &priv->status);
-       __iwl3945_down(priv);
-       clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* tried to restart and config the device for as long as our
-        * patience could withstand */
-       IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
-       return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, init_alive_start.work);
-
-       mutex_lock(&priv->mutex);
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               goto out;
-
-       iwl3945_init_alive_start(priv);
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, alive_start.work);
-
-       mutex_lock(&priv->mutex);
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               goto out;
-
-       iwl3945_alive_start(priv);
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change.  This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
-       bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
-       bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
-                       & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
-       if (new_rfkill != old_rfkill) {
-               if (new_rfkill)
-                       set_bit(STATUS_RF_KILL_HW, &priv->status);
-               else
-                       clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
-               IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
-                               new_rfkill ? "disable radio" : "enable radio");
-       }
-
-       /* Keep this running, even if radio now enabled.  This will be
-        * cancelled in mac_start() if system decides to start again */
-       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-                          round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_SCAN_CMD,
-               .len = sizeof(struct iwl3945_scan_cmd),
-               .flags = CMD_SIZE_HUGE,
-       };
-       struct iwl3945_scan_cmd *scan;
-       u8 n_probes = 0;
-       enum ieee80211_band band;
-       bool is_active = false;
-       int ret;
-       u16 len;
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->scan_cmd) {
-               priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
-                                        IWL_MAX_SCAN_SIZE, GFP_KERNEL);
-               if (!priv->scan_cmd) {
-                       IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
-                       return -ENOMEM;
-               }
-       }
-       scan = priv->scan_cmd;
-       memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
-       scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
-       scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
-       if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-               u16 interval;
-               u32 extra;
-               u32 suspend_time = 100;
-               u32 scan_suspend_time = 100;
-
-               IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
-               interval = vif->bss_conf.beacon_int;
-
-               scan->suspend_time = 0;
-               scan->max_out_time = cpu_to_le32(200 * 1024);
-               if (!interval)
-                       interval = suspend_time;
-               /*
-                * suspend time format:
-                *  0-19: beacon interval in usec (time before exec.)
-                * 20-23: 0
-                * 24-31: number of beacons (suspend between channels)
-                */
-
-               extra = (suspend_time / interval) << 24;
-               scan_suspend_time = 0xFF0FFFFF &
-                   (extra | ((suspend_time % interval) * 1024));
-
-               scan->suspend_time = cpu_to_le32(scan_suspend_time);
-               IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
-                              scan_suspend_time, interval);
-       }
-
-       if (priv->scan_request->n_ssids) {
-               int i, p = 0;
-               IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
-               for (i = 0; i < priv->scan_request->n_ssids; i++) {
-                       /* always does wildcard anyway */
-                       if (!priv->scan_request->ssids[i].ssid_len)
-                               continue;
-                       scan->direct_scan[p].id = WLAN_EID_SSID;
-                       scan->direct_scan[p].len =
-                               priv->scan_request->ssids[i].ssid_len;
-                       memcpy(scan->direct_scan[p].ssid,
-                              priv->scan_request->ssids[i].ssid,
-                              priv->scan_request->ssids[i].ssid_len);
-                       n_probes++;
-                       p++;
-               }
-               is_active = true;
-       } else
-               IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
-       /* We don't build a direct scan probe request; the uCode will do
-        * that based on the direct_mask added to each channel entry */
-       scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
-       scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-       scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
-       /* flags + rate selection */
-
-       switch (priv->scan_band) {
-       case IEEE80211_BAND_2GHZ:
-               scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
-               scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
-               band = IEEE80211_BAND_2GHZ;
-               break;
-       case IEEE80211_BAND_5GHZ:
-               scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
-               band = IEEE80211_BAND_5GHZ;
-               break;
-       default:
-               IWL_WARN(priv, "Invalid scan band\n");
-               return -EIO;
-       }
-
-       /*
-        * If active scaning is requested but a certain channel
-        * is marked passive, we can do active scanning if we
-        * detect transmissions.
-        */
-       scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
-                                       IWL_GOOD_CRC_TH_DISABLED;
-
-       len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
-                                       vif->addr, priv->scan_request->ie,
-                                       priv->scan_request->ie_len,
-                                       IWL_MAX_SCAN_SIZE - sizeof(*scan));
-       scan->tx_cmd.len = cpu_to_le16(len);
-
-       /* select Rx antennas */
-       scan->flags |= iwl3945_get_antenna_flags(priv);
-
-       scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
-                                                           (void *)&scan->data[len], vif);
-       if (scan->channel_count == 0) {
-               IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
-               return -EIO;
-       }
-
-       cmd.len += le16_to_cpu(scan->tx_cmd.len) +
-           scan->channel_count * sizeof(struct iwl3945_scan_channel);
-       cmd.data = scan;
-       scan->len = cpu_to_le16(cmd.len);
-
-       set_bit(STATUS_SCAN_HW, &priv->status);
-       ret = iwl_legacy_send_cmd_sync(priv, &cmd);
-       if (ret)
-               clear_bit(STATUS_SCAN_HW, &priv->status);
-       return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       /*
-        * Since setting the RXON may have been deferred while
-        * performing the scan, fire one off if needed
-        */
-       if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
-               iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
-       struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
-               struct iwl_rxon_context *ctx;
-               mutex_lock(&priv->mutex);
-               for_each_context(priv, ctx)
-                       ctx->vif = NULL;
-               priv->is_open = 0;
-               mutex_unlock(&priv->mutex);
-               iwl3945_down(priv);
-               ieee80211_restart_hw(priv->hw);
-       } else {
-               iwl3945_down(priv);
-
-               mutex_lock(&priv->mutex);
-               if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-                       mutex_unlock(&priv->mutex);
-                       return;
-               }
-
-               __iwl3945_up(priv);
-               mutex_unlock(&priv->mutex);
-       }
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, rx_replenish);
-
-       mutex_lock(&priv->mutex);
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               goto out;
-
-       iwl3945_rx_replenish(priv);
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
-       int rc = 0;
-       struct ieee80211_conf *conf = NULL;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       if (!ctx->vif || !priv->is_open)
-               return;
-
-       IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
-                       ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       iwl_legacy_scan_cancel_timeout(priv, 200);
-
-       conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       iwl3945_commit_rxon(priv, ctx);
-
-       rc = iwl_legacy_send_rxon_timing(priv, ctx);
-       if (rc)
-               IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
-                           "Attempting to continue.\n");
-
-       ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
-       ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
-       IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
-                       ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
-       if (ctx->vif->bss_conf.use_short_preamble)
-               ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-       if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-               if (ctx->vif->bss_conf.use_short_slot)
-                       ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
-               else
-                       ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-       }
-
-       iwl3945_commit_rxon(priv, ctx);
-
-       switch (ctx->vif->type) {
-       case NL80211_IFTYPE_STATION:
-               iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
-               break;
-       case NL80211_IFTYPE_ADHOC:
-               iwl3945_send_beacon_cmd(priv);
-               break;
-       default:
-               IWL_ERR(priv, "%s Should not be called in %d mode\n",
-                       __func__, ctx->vif->type);
-               break;
-       }
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT    (2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       /* we should be verifying the device is ready to be opened */
-       mutex_lock(&priv->mutex);
-
-       /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
-        * ucode filename and max sizes are card-specific. */
-
-       if (!priv->ucode_code.len) {
-               ret = iwl3945_read_ucode(priv);
-               if (ret) {
-                       IWL_ERR(priv, "Could not read microcode: %d\n", ret);
-                       mutex_unlock(&priv->mutex);
-                       goto out_release_irq;
-               }
-       }
-
-       ret = __iwl3945_up(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       if (ret)
-               goto out_release_irq;
-
-       IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
-       /* Wait for START_ALIVE from ucode. Otherwise callbacks from
-        * mac80211 will not be run successfully. */
-       ret = wait_event_timeout(priv->wait_command_queue,
-                       test_bit(STATUS_READY, &priv->status),
-                       UCODE_READY_TIMEOUT);
-       if (!ret) {
-               if (!test_bit(STATUS_READY, &priv->status)) {
-                       IWL_ERR(priv,
-                               "Wait for START_ALIVE timeout after %dms.\n",
-                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
-                       ret = -ETIMEDOUT;
-                       goto out_release_irq;
-               }
-       }
-
-       /* ucode is running and will send rfkill notifications,
-        * no need to poll the killswitch state anymore */
-       cancel_delayed_work(&priv->_3945.rfkill_poll);
-
-       priv->is_open = 1;
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
-
-out_release_irq:
-       priv->is_open = 0;
-       IWL_DEBUG_MAC80211(priv, "leave - failed\n");
-       return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (!priv->is_open) {
-               IWL_DEBUG_MAC80211(priv, "leave - skip\n");
-               return;
-       }
-
-       priv->is_open = 0;
-
-       iwl3945_down(priv);
-
-       flush_workqueue(priv->workqueue);
-
-       /* start polling the killswitch state again */
-       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-                          round_jiffies_relative(2 * HZ));
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-       if (iwl3945_tx_skb(priv, skb))
-               dev_kfree_skb_any(skb);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif = ctx->vif;
-       int rc = 0;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       /* The following should be done only at AP bring up */
-       if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
-               /* RXON - unassoc (to set timing command) */
-               ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-               iwl3945_commit_rxon(priv, ctx);
-
-               /* RXON Timing */
-               rc = iwl_legacy_send_rxon_timing(priv, ctx);
-               if (rc)
-                       IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
-                                       "Attempting to continue.\n");
-
-               ctx->staging.assoc_id = 0;
-
-               if (vif->bss_conf.use_short_preamble)
-                       ctx->staging.flags |=
-                               RXON_FLG_SHORT_PREAMBLE_MSK;
-               else
-                       ctx->staging.flags &=
-                               ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
-               if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
-                       if (vif->bss_conf.use_short_slot)
-                               ctx->staging.flags |=
-                                       RXON_FLG_SHORT_SLOT_MSK;
-                       else
-                               ctx->staging.flags &=
-                                       ~RXON_FLG_SHORT_SLOT_MSK;
-               }
-               /* restore RXON assoc */
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               iwl3945_commit_rxon(priv, ctx);
-       }
-       iwl3945_send_beacon_cmd(priv);
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                              struct ieee80211_vif *vif,
-                              struct ieee80211_sta *sta,
-                              struct ieee80211_key_conf *key)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret = 0;
-       u8 sta_id = IWL_INVALID_STATION;
-       u8 static_key;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (iwl3945_mod_params.sw_crypto) {
-               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-               return -EOPNOTSUPP;
-       }
-
-       /*
-        * To support IBSS RSN, don't program group keys in IBSS, the
-        * hardware will then not attempt to decrypt the frames.
-        */
-       if (vif->type == NL80211_IFTYPE_ADHOC &&
-           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
-               return -EOPNOTSUPP;
-
-       static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-
-       if (!static_key) {
-               sta_id = iwl_legacy_sta_id_or_broadcast(
-                               priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
-               if (sta_id == IWL_INVALID_STATION)
-                       return -EINVAL;
-       }
-
-       mutex_lock(&priv->mutex);
-       iwl_legacy_scan_cancel_timeout(priv, 100);
-
-       switch (cmd) {
-       case SET_KEY:
-               if (static_key)
-                       ret = iwl3945_set_static_key(priv, key);
-               else
-                       ret = iwl3945_set_dynamic_key(priv, key, sta_id);
-               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-               break;
-       case DISABLE_KEY:
-               if (static_key)
-                       ret = iwl3945_remove_static_key(priv);
-               else
-                       ret = iwl3945_clear_sta_key_info(priv, sta_id);
-               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       mutex_unlock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif,
-                              struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
-       int ret;
-       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-       u8 sta_id;
-
-       IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
-                       sta->addr);
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-                       sta->addr);
-       sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
-       ret = iwl_legacy_add_station_common(priv,
-                               &priv->contexts[IWL_RXON_CTX_BSS],
-                                    sta->addr, is_ap, sta, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-                       sta->addr, ret);
-               /* Should we return success if return code is EEXIST ? */
-               mutex_unlock(&priv->mutex);
-               return ret;
-       }
-
-       sta_priv->common.sta_id = sta_id;
-
-       /* Initialize rate scaling */
-       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-                      sta->addr);
-       iwl3945_rs_rate_init(priv, sta, sta_id);
-       mutex_unlock(&priv->mutex);
-
-       return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
-                                    unsigned int changed_flags,
-                                    unsigned int *total_flags,
-                                    u64 multicast)
-{
-       struct iwl_priv *priv = hw->priv;
-       __le32 filter_or = 0, filter_nand = 0;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag)        do { \
-       if (*total_flags & (test))              \
-               filter_or |= (flag);            \
-       else                                    \
-               filter_nand |= (flag);          \
-       } while (0)
-
-       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-                       changed_flags, *total_flags);
-
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
-       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-       mutex_lock(&priv->mutex);
-
-       ctx->staging.filter_flags &= ~filter_nand;
-       ctx->staging.filter_flags |= filter_or;
-
-       /*
-        * Not committing directly because hardware can perform a scan,
-        * but even if hw is ready, committing here breaks for some reason,
-        * we'll eventually commit the filter flags change anyway.
-        */
-
-       mutex_unlock(&priv->mutex);
-
-       /*
-        * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_legacy_connection_init_rx_config()
-        * since we currently do not support programming multicast
-        * filters into the device.
-        */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl3945_show_debug_level(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl3945_store_debug_level(struct device *d,
-                               struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       unsigned long val;
-       int ret;
-
-       ret = strict_strtoul(buf, 0, &val);
-       if (ret)
-               IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
-       else {
-               priv->debug_level = val;
-               if (iwl_legacy_alloc_traffic_mem(priv))
-                       IWL_ERR(priv,
-                               "Not enough memory to generate traffic log\n");
-       }
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-                       iwl3945_show_debug_level, iwl3945_store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-static ssize_t iwl3945_show_temperature(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-
-static ssize_t iwl3945_show_tx_power(struct device *d,
-                            struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl3945_store_tx_power(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       char *p = (char *)buf;
-       u32 val;
-
-       val = simple_strtoul(p, &p, 10);
-       if (p == buf)
-               IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
-       else
-               iwl3945_hw_reg_set_txpower(priv, val);
-
-       return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-
-static ssize_t iwl3945_show_flags(struct device *d,
-                         struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t iwl3945_store_flags(struct device *d,
-                          struct device_attribute *attr,
-                          const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       u32 flags = simple_strtoul(buf, NULL, 0);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       mutex_lock(&priv->mutex);
-       if (le32_to_cpu(ctx->staging.flags) != flags) {
-               /* Cancel any currently running scans... */
-               if (iwl_legacy_scan_cancel_timeout(priv, 100))
-                       IWL_WARN(priv, "Could not cancel scan.\n");
-               else {
-                       IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
-                                      flags);
-                       ctx->staging.flags = cpu_to_le32(flags);
-                       iwl3945_commit_rxon(priv, ctx);
-               }
-       }
-       mutex_unlock(&priv->mutex);
-
-       return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-
-static ssize_t iwl3945_show_filter_flags(struct device *d,
-                                struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       return sprintf(buf, "0x%04X\n",
-               le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t iwl3945_store_filter_flags(struct device *d,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
-       mutex_lock(&priv->mutex);
-       if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
-               /* Cancel any currently running scans... */
-               if (iwl_legacy_scan_cancel_timeout(priv, 100))
-                       IWL_WARN(priv, "Could not cancel scan.\n");
-               else {
-                       IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
-                                      "0x%04X\n", filter_flags);
-                       ctx->staging.filter_flags =
-                               cpu_to_le32(filter_flags);
-                       iwl3945_commit_rxon(priv, ctx);
-               }
-       }
-       mutex_unlock(&priv->mutex);
-
-       return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
-                  iwl3945_store_filter_flags);
-
-static ssize_t iwl3945_show_measurement(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_spectrum_notification measure_report;
-       u32 size = sizeof(measure_report), len = 0, ofs = 0;
-       u8 *data = (u8 *)&measure_report;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       if (!(priv->measurement_status & MEASUREMENT_READY)) {
-               spin_unlock_irqrestore(&priv->lock, flags);
-               return 0;
-       }
-       memcpy(&measure_report, &priv->measure_report, size);
-       priv->measurement_status = 0;
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       while (size && (PAGE_SIZE - len)) {
-               hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-                                  PAGE_SIZE - len, 1);
-               len = strlen(buf);
-               if (PAGE_SIZE - len)
-                       buf[len++] = '\n';
-
-               ofs += 16;
-               size -= min(size, 16U);
-       }
-
-       return len;
-}
-
-static ssize_t iwl3945_store_measurement(struct device *d,
-                                struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_measurement_params params = {
-               .channel = le16_to_cpu(ctx->active.channel),
-               .start_time = cpu_to_le64(priv->_3945.last_tsf),
-               .duration = cpu_to_le16(1),
-       };
-       u8 type = IWL_MEASURE_BASIC;
-       u8 buffer[32];
-       u8 channel;
-
-       if (count) {
-               char *p = buffer;
-               strncpy(buffer, buf, min(sizeof(buffer), count));
-               channel = simple_strtoul(p, NULL, 0);
-               if (channel)
-                       params.channel = channel;
-
-               p = buffer;
-               while (*p && *p != ' ')
-                       p++;
-               if (*p)
-                       type = simple_strtoul(p + 1, NULL, 0);
-       }
-
-       IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
-                      "channel %d (for '%s')\n", type, params.channel, buf);
-       iwl3945_get_measurement(priv, &params, type);
-
-       return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
-                  iwl3945_show_measurement, iwl3945_store_measurement);
-
-static ssize_t iwl3945_store_retry_rate(struct device *d,
-                               struct device_attribute *attr,
-                               const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       priv->retry_rate = simple_strtoul(buf, NULL, 0);
-       if (priv->retry_rate <= 0)
-               priv->retry_rate = 1;
-
-       return count;
-}
-
-static ssize_t iwl3945_show_retry_rate(struct device *d,
-                              struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
-                  iwl3945_store_retry_rate);
-
-
-static ssize_t iwl3945_show_channels(struct device *d,
-                            struct device_attribute *attr, char *buf)
-{
-       /* all this shit doesn't belong into sysfs anyway */
-       return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-
-static ssize_t iwl3945_show_antenna(struct device *d,
-                           struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t iwl3945_store_antenna(struct device *d,
-                            struct device_attribute *attr,
-                            const char *buf, size_t count)
-{
-       struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
-       int ant;
-
-       if (count == 0)
-               return 0;
-
-       if (sscanf(buf, "%1i", &ant) != 1) {
-               IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
-               return count;
-       }
-
-       if ((ant >= 0) && (ant <= 2)) {
-               IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
-               iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
-       } else
-               IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
-       return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-
-static ssize_t iwl3945_show_status(struct device *d,
-                          struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-       return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-
-static ssize_t iwl3945_dump_error_log(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       char *p = (char *)buf;
-
-       if (p[0] == '1')
-               iwl3945_dump_nic_error_log(priv);
-
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
-       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
-       init_waitqueue_head(&priv->wait_command_queue);
-
-       INIT_WORK(&priv->restart, iwl3945_bg_restart);
-       INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
-       INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
-       INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
-       INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
-       iwl_legacy_setup_scan_deferred_work(priv);
-
-       iwl3945_hw_setup_deferred_work(priv);
-
-       init_timer(&priv->watchdog);
-       priv->watchdog.data = (unsigned long)priv;
-       priv->watchdog.function = iwl_legacy_bg_watchdog;
-
-       tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
-                    iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
-       iwl3945_hw_cancel_deferred_work(priv);
-
-       cancel_delayed_work_sync(&priv->init_alive_start);
-       cancel_delayed_work(&priv->alive_start);
-
-       iwl_legacy_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
-       &dev_attr_antenna.attr,
-       &dev_attr_channels.attr,
-       &dev_attr_dump_errors.attr,
-       &dev_attr_flags.attr,
-       &dev_attr_filter_flags.attr,
-       &dev_attr_measurement.attr,
-       &dev_attr_retry_rate.attr,
-       &dev_attr_status.attr,
-       &dev_attr_temperature.attr,
-       &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       &dev_attr_debug_level.attr,
-#endif
-       NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
-       .name = NULL,           /* put in device directory */
-       .attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
-       .tx = iwl3945_mac_tx,
-       .start = iwl3945_mac_start,
-       .stop = iwl3945_mac_stop,
-       .add_interface = iwl_legacy_mac_add_interface,
-       .remove_interface = iwl_legacy_mac_remove_interface,
-       .change_interface = iwl_legacy_mac_change_interface,
-       .config = iwl_legacy_mac_config,
-       .configure_filter = iwl3945_configure_filter,
-       .set_key = iwl3945_mac_set_key,
-       .conf_tx = iwl_legacy_mac_conf_tx,
-       .reset_tsf = iwl_legacy_mac_reset_tsf,
-       .bss_info_changed = iwl_legacy_mac_bss_info_changed,
-       .hw_scan = iwl_legacy_mac_hw_scan,
-       .sta_add = iwl3945_mac_sta_add,
-       .sta_remove = iwl_legacy_mac_sta_remove,
-       .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
-       int ret;
-       struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
-       priv->retry_rate = 1;
-       priv->beacon_skb = NULL;
-
-       spin_lock_init(&priv->sta_lock);
-       spin_lock_init(&priv->hcmd_lock);
-
-       INIT_LIST_HEAD(&priv->free_frames);
-
-       mutex_init(&priv->mutex);
-
-       priv->ieee_channels = NULL;
-       priv->ieee_rates = NULL;
-       priv->band = IEEE80211_BAND_2GHZ;
-
-       priv->iw_mode = NL80211_IFTYPE_STATION;
-       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
-       /* initialize force reset */
-       priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
-       if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
-               IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
-                        eeprom->version);
-               ret = -EINVAL;
-               goto err;
-       }
-       ret = iwl_legacy_init_channel_map(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-               goto err;
-       }
-
-       /* Set up txpower settings in driver for all channels */
-       if (iwl3945_txpower_set_from_eeprom(priv)) {
-               ret = -EIO;
-               goto err_free_channel_map;
-       }
-
-       ret = iwl_legacy_init_geos(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-               goto err_free_channel_map;
-       }
-       iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
-       return 0;
-
-err_free_channel_map:
-       iwl_legacy_free_channel_map(priv);
-err:
-       return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST      200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
-       int ret;
-       struct ieee80211_hw *hw = priv->hw;
-
-       hw->rate_control_algorithm = "iwl-3945-rs";
-       hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
-       hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-       /* Tell mac80211 our characteristics */
-       hw->flags = IEEE80211_HW_SIGNAL_DBM |
-                   IEEE80211_HW_SPECTRUM_MGMT;
-
-       hw->wiphy->interface_modes =
-               priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
-       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
-                           WIPHY_FLAG_IBSS_RSN;
-
-       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
-       /* we create the 802.11 header and a zero-length SSID element */
-       hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
-       /* Default value; 4 EDCA QOS priorities */
-       hw->queues = 4;
-
-       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &priv->bands[IEEE80211_BAND_2GHZ];
-
-       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &priv->bands[IEEE80211_BAND_5GHZ];
-
-       iwl_legacy_leds_init(priv);
-
-       ret = ieee80211_register_hw(priv->hw);
-       if (ret) {
-               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-               return ret;
-       }
-       priv->mac80211_registered = 1;
-
-       return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int err = 0, i;
-       struct iwl_priv *priv;
-       struct ieee80211_hw *hw;
-       struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
-       struct iwl3945_eeprom *eeprom;
-       unsigned long flags;
-
-       /***********************
-        * 1. Allocating HW data
-        * ********************/
-
-       /* mac80211 allocates memory for this device instance, including
-        *   space for this driver's private structure */
-       hw = iwl_legacy_alloc_all(cfg);
-       if (hw == NULL) {
-               pr_err("Can not allocate network device\n");
-               err = -ENOMEM;
-               goto out;
-       }
-       priv = hw->priv;
-       SET_IEEE80211_DEV(hw, &pdev->dev);
-
-       priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
-       /* 3945 has only one valid context */
-       priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
-       for (i = 0; i < NUM_IWL_RXON_CTX; i++)
-               priv->contexts[i].ctxid = i;
-
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
-       priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
-       priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
-       priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
-       priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
-               BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC);
-       priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
-       priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
-       priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
-       /*
-        * Disabling hardware scan means that mac80211 will perform scans
-        * "the hard way", rather than using device's scan.
-        */
-       if (iwl3945_mod_params.disable_hw_scan) {
-               IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
-               iwl3945_hw_ops.hw_scan = NULL;
-       }
-
-       IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-       priv->cfg = cfg;
-       priv->pci_dev = pdev;
-       priv->inta_mask = CSR_INI_SET_MASK;
-
-       if (iwl_legacy_alloc_traffic_mem(priv))
-               IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
-       /***************************
-        * 2. Initializing PCI bus
-        * *************************/
-       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-                               PCIE_LINK_STATE_CLKPM);
-
-       if (pci_enable_device(pdev)) {
-               err = -ENODEV;
-               goto out_ieee80211_free_hw;
-       }
-
-       pci_set_master(pdev);
-
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (err) {
-               IWL_WARN(priv, "No suitable DMA available.\n");
-               goto out_pci_disable_device;
-       }
-
-       pci_set_drvdata(pdev, priv);
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err)
-               goto out_pci_disable_device;
-
-       /***********************
-        * 3. Read REV Register
-        * ********************/
-       priv->hw_base = pci_iomap(pdev, 0, 0);
-       if (!priv->hw_base) {
-               err = -ENODEV;
-               goto out_pci_release_regions;
-       }
-
-       IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
-                       (unsigned long long) pci_resource_len(pdev, 0));
-       IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
-       /* We disable the RETRY_TIMEOUT register (0x41) to keep
-        * PCI Tx retries from interfering with C3 CPU state */
-       pci_write_config_byte(pdev, 0x41, 0x00);
-
-       /* these spin locks will be used in apm_ops.init and EEPROM access
-        * we should init now
-        */
-       spin_lock_init(&priv->reg_lock);
-       spin_lock_init(&priv->lock);
-
-       /*
-        * stop and reset the on-board processor just in case it is in a
-        * strange state ... like being left stranded by a primary kernel
-        * and this is now the kdump kernel trying to start up
-        */
-       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-       /***********************
-        * 4. Read EEPROM
-        * ********************/
-
-       /* Read the EEPROM */
-       err = iwl_legacy_eeprom_init(priv);
-       if (err) {
-               IWL_ERR(priv, "Unable to init EEPROM\n");
-               goto out_iounmap;
-       }
-       /* MAC Address location in EEPROM same for 3945/4965 */
-       eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-       IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
-       SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
-       /***********************
-        * 5. Setup HW Constants
-        * ********************/
-       /* Device-specific setup */
-       if (iwl3945_hw_set_hw_params(priv)) {
-               IWL_ERR(priv, "failed to set hw settings\n");
-               goto out_eeprom_free;
-       }
-
-       /***********************
-        * 6. Setup priv
-        * ********************/
-
-       err = iwl3945_init_drv(priv);
-       if (err) {
-               IWL_ERR(priv, "initializing driver failed\n");
-               goto out_unset_hw_params;
-       }
-
-       IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
-               priv->cfg->name);
-
-       /***********************
-        * 7. Setup Services
-        * ********************/
-
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_legacy_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       pci_enable_msi(priv->pci_dev);
-
-       err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
-                         IRQF_SHARED, DRV_NAME, priv);
-       if (err) {
-               IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
-               goto out_disable_msi;
-       }
-
-       err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-       if (err) {
-               IWL_ERR(priv, "failed to create sysfs device attributes\n");
-               goto out_release_irq;
-       }
-
-       iwl_legacy_set_rxon_channel(priv,
-                            &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
-                            &priv->contexts[IWL_RXON_CTX_BSS]);
-       iwl3945_setup_deferred_work(priv);
-       iwl3945_setup_rx_handlers(priv);
-       iwl_legacy_power_initialize(priv);
-
-       /*********************************
-        * 8. Setup and Register mac80211
-        * *******************************/
-
-       iwl_legacy_enable_interrupts(priv);
-
-       err = iwl3945_setup_mac(priv);
-       if (err)
-               goto  out_remove_sysfs;
-
-       err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
-       if (err)
-               IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
-       /* Start monitoring the killswitch */
-       queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
-                          2 * HZ);
-
-       return 0;
-
- out_remove_sysfs:
-       destroy_workqueue(priv->workqueue);
-       priv->workqueue = NULL;
-       sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
-       free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
-       pci_disable_msi(priv->pci_dev);
-       iwl_legacy_free_geos(priv);
-       iwl_legacy_free_channel_map(priv);
- out_unset_hw_params:
-       iwl3945_unset_hw_params(priv);
- out_eeprom_free:
-       iwl_legacy_eeprom_free(priv);
- out_iounmap:
-       pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
-       pci_release_regions(pdev);
- out_pci_disable_device:
-       pci_set_drvdata(pdev, NULL);
-       pci_disable_device(pdev);
- out_ieee80211_free_hw:
-       iwl_legacy_free_traffic_mem(priv);
-       ieee80211_free_hw(priv->hw);
- out:
-       return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
-       struct iwl_priv *priv = pci_get_drvdata(pdev);
-       unsigned long flags;
-
-       if (!priv)
-               return;
-
-       IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
-       iwl_legacy_dbgfs_unregister(priv);
-
-       set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       iwl_legacy_leds_exit(priv);
-
-       if (priv->mac80211_registered) {
-               ieee80211_unregister_hw(priv->hw);
-               priv->mac80211_registered = 0;
-       } else {
-               iwl3945_down(priv);
-       }
-
-       /*
-        * Make sure device is reset to low power before unloading driver.
-        * This may be redundant with iwl_down(), but there are paths to
-        * run iwl_down() without calling apm_ops.stop(), and there are
-        * paths to avoid running iwl_down() at all before leaving driver.
-        * This (inexpensive) call *makes sure* device is reset.
-        */
-       iwl_legacy_apm_stop(priv);
-
-       /* make sure we flush any pending irq or
-        * tasklet for the driver
-        */
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_legacy_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl3945_synchronize_irq(priv);
-
-       sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
-       cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
-       iwl3945_dealloc_ucode_pci(priv);
-
-       if (priv->rxq.bd)
-               iwl3945_rx_queue_free(priv, &priv->rxq);
-       iwl3945_hw_txq_ctx_free(priv);
-
-       iwl3945_unset_hw_params(priv);
-
-       /*netif_stop_queue(dev); */
-       flush_workqueue(priv->workqueue);
-
-       /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
-        * priv->workqueue... so we can't take down the workqueue
-        * until now... */
-       destroy_workqueue(priv->workqueue);
-       priv->workqueue = NULL;
-       iwl_legacy_free_traffic_mem(priv);
-
-       free_irq(pdev->irq, priv);
-       pci_disable_msi(pdev);
-
-       pci_iounmap(pdev, priv->hw_base);
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-
-       iwl_legacy_free_channel_map(priv);
-       iwl_legacy_free_geos(priv);
-       kfree(priv->scan_cmd);
-       if (priv->beacon_skb)
-               dev_kfree_skb(priv->beacon_skb);
-
-       ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
-       .name = DRV_NAME,
-       .id_table = iwl3945_hw_card_ids,
-       .probe = iwl3945_pci_probe,
-       .remove = __devexit_p(iwl3945_pci_remove),
-       .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
-       int ret;
-       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
-       pr_info(DRV_COPYRIGHT "\n");
-
-       ret = iwl3945_rate_control_register();
-       if (ret) {
-               pr_err("Unable to register rate control algorithm: %d\n", ret);
-               return ret;
-       }
-
-       ret = pci_register_driver(&iwl3945_driver);
-       if (ret) {
-               pr_err("Unable to initialize PCI module\n");
-               goto error_register;
-       }
-
-       return ret;
-
-error_register:
-       iwl3945_rate_control_unregister();
-       return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
-       pci_unregister_driver(&iwl3945_driver);
-       iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
-               "using software crypto (default 1 [software])");
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
-               int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644 (file)
index d2fba9e..0000000
+++ /dev/null
@@ -1,3281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME        "iwl4965"
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
-#include "iwl-4965-calib.h"
-#include "iwl-4965.h"
-#include "iwl-4965-led.h"
-
-
-/******************************************************************************
- *
- * module boiler plate
- *
- ******************************************************************************/
-
-/*
- * module name, copyright, version, etc.
- */
-#define DRV_DESCRIPTION        "Intel(R) Wireless WiFi 4965 driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION     IWLWIFI_VERSION VD
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
-
-void iwl4965_update_chain_flags(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx;
-
-       if (priv->cfg->ops->hcmd->set_rxon_chain) {
-               for_each_context(priv, ctx) {
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-                       if (ctx->active.rx_chain != ctx->staging.rx_chain)
-                               iwl_legacy_commit_rxon(priv, ctx);
-               }
-       }
-}
-
-static void iwl4965_clear_free_frames(struct iwl_priv *priv)
-{
-       struct list_head *element;
-
-       IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
-                      priv->frames_count);
-
-       while (!list_empty(&priv->free_frames)) {
-               element = priv->free_frames.next;
-               list_del(element);
-               kfree(list_entry(element, struct iwl_frame, list));
-               priv->frames_count--;
-       }
-
-       if (priv->frames_count) {
-               IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
-                           priv->frames_count);
-               priv->frames_count = 0;
-       }
-}
-
-static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
-{
-       struct iwl_frame *frame;
-       struct list_head *element;
-       if (list_empty(&priv->free_frames)) {
-               frame = kzalloc(sizeof(*frame), GFP_KERNEL);
-               if (!frame) {
-                       IWL_ERR(priv, "Could not allocate frame!\n");
-                       return NULL;
-               }
-
-               priv->frames_count++;
-               return frame;
-       }
-
-       element = priv->free_frames.next;
-       list_del(element);
-       return list_entry(element, struct iwl_frame, list);
-}
-
-static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
-{
-       memset(frame, 0, sizeof(*frame));
-       list_add(&frame->list, &priv->free_frames);
-}
-
-static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
-                                struct ieee80211_hdr *hdr,
-                                int left)
-{
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->beacon_skb)
-               return 0;
-
-       if (priv->beacon_skb->len > left)
-               return 0;
-
-       memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
-       return priv->beacon_skb->len;
-}
-
-/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
-static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
-                              struct iwl_tx_beacon_cmd *tx_beacon_cmd,
-                              u8 *beacon, u32 frame_size)
-{
-       u16 tim_idx;
-       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
-
-       /*
-        * The index is relative to frame start but we start looking at the
-        * variable-length part of the beacon.
-        */
-       tim_idx = mgmt->u.beacon.variable - beacon;
-
-       /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
-       while ((tim_idx < (frame_size - 2)) &&
-                       (beacon[tim_idx] != WLAN_EID_TIM))
-               tim_idx += beacon[tim_idx+1] + 2;
-
-       /* If TIM field was found, set variables */
-       if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
-               tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
-               tx_beacon_cmd->tim_size = beacon[tim_idx+1];
-       } else
-               IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
-}
-
-static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
-                                      struct iwl_frame *frame)
-{
-       struct iwl_tx_beacon_cmd *tx_beacon_cmd;
-       u32 frame_size;
-       u32 rate_flags;
-       u32 rate;
-       /*
-        * We have to set up the TX command, the TX Beacon command, and the
-        * beacon contents.
-        */
-
-       lockdep_assert_held(&priv->mutex);
-
-       if (!priv->beacon_ctx) {
-               IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
-               return 0;
-       }
-
-       /* Initialize memory */
-       tx_beacon_cmd = &frame->u.beacon;
-       memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
-       /* Set up TX beacon contents */
-       frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
-                               sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-       if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
-               return 0;
-       if (!frame_size)
-               return 0;
-
-       /* Set up TX command fields */
-       tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-       tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
-       tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-       tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
-               TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
-
-       /* Set up TX beacon command fields */
-       iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
-                          frame_size);
-
-       /* Set up packet rate and flags */
-       rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
-       priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
-                                             priv->hw_params.valid_tx_ant);
-       rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-       if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
-               rate_flags |= RATE_MCS_CCK_MSK;
-       tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
-                       rate_flags);
-
-       return sizeof(*tx_beacon_cmd) + frame_size;
-}
-
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
-{
-       struct iwl_frame *frame;
-       unsigned int frame_size;
-       int rc;
-
-       frame = iwl4965_get_free_frame(priv);
-       if (!frame) {
-               IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
-                         "command.\n");
-               return -ENOMEM;
-       }
-
-       frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
-       if (!frame_size) {
-               IWL_ERR(priv, "Error configuring the beacon command\n");
-               iwl4965_free_frame(priv, frame);
-               return -EINVAL;
-       }
-
-       rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
-                             &frame->u.cmd[0]);
-
-       iwl4965_free_frame(priv, frame);
-
-       return rc;
-}
-
-static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-       dma_addr_t addr = get_unaligned_le32(&tb->lo);
-       if (sizeof(dma_addr_t) > sizeof(u32))
-               addr |=
-               ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
-       return addr;
-}
-
-static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
-       return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
-                                 dma_addr_t addr, u16 len)
-{
-       struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-       u16 hi_n_len = len << 4;
-
-       put_unaligned_le32(addr, &tb->lo);
-       if (sizeof(dma_addr_t) > sizeof(u32))
-               hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
-       tb->hi_n_len = cpu_to_le16(hi_n_len);
-
-       tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
-       return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
-       struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
-       struct iwl_tfd *tfd;
-       struct pci_dev *dev = priv->pci_dev;
-       int index = txq->q.read_ptr;
-       int i;
-       int num_tbs;
-
-       tfd = &tfd_tmp[index];
-
-       /* Sanity check on number of chunks */
-       num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
-       if (num_tbs >= IWL_NUM_OF_TBS) {
-               IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
-               /* @todo issue fatal error, it is quite serious situation */
-               return;
-       }
-
-       /* Unmap tx_cmd */
-       if (num_tbs)
-               pci_unmap_single(dev,
-                               dma_unmap_addr(&txq->meta[index], mapping),
-                               dma_unmap_len(&txq->meta[index], len),
-                               PCI_DMA_BIDIRECTIONAL);
-
-       /* Unmap chunks, if any. */
-       for (i = 1; i < num_tbs; i++)
-               pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
-                               iwl4965_tfd_tb_get_len(tfd, i),
-                               PCI_DMA_TODEVICE);
-
-       /* free SKB */
-       if (txq->txb) {
-               struct sk_buff *skb;
-
-               skb = txq->txb[txq->q.read_ptr].skb;
-
-               /* can be called from irqs-disabled context */
-               if (skb) {
-                       dev_kfree_skb_any(skb);
-                       txq->txb[txq->q.read_ptr].skb = NULL;
-               }
-       }
-}
-
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
-                                struct iwl_tx_queue *txq,
-                                dma_addr_t addr, u16 len,
-                                u8 reset, u8 pad)
-{
-       struct iwl_queue *q;
-       struct iwl_tfd *tfd, *tfd_tmp;
-       u32 num_tbs;
-
-       q = &txq->q;
-       tfd_tmp = (struct iwl_tfd *)txq->tfds;
-       tfd = &tfd_tmp[q->write_ptr];
-
-       if (reset)
-               memset(tfd, 0, sizeof(*tfd));
-
-       num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
-       /* Each TFD can point to a maximum 20 Tx buffers */
-       if (num_tbs >= IWL_NUM_OF_TBS) {
-               IWL_ERR(priv, "Error can not send more than %d chunks\n",
-                         IWL_NUM_OF_TBS);
-               return -EINVAL;
-       }
-
-       BUG_ON(addr & ~DMA_BIT_MASK(36));
-       if (unlikely(addr & ~IWL_TX_DMA_MASK))
-               IWL_ERR(priv, "Unaligned address = %llx\n",
-                         (unsigned long long)addr);
-
-       iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
-
-       return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
-                        struct iwl_tx_queue *txq)
-{
-       int txq_id = txq->q.id;
-
-       /* Circular buffer (TFD queue in DRAM) physical base address */
-       iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
-                            txq->q.dma_addr >> 8);
-
-       return 0;
-}
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_alive_resp *palive;
-       struct delayed_work *pwork;
-
-       palive = &pkt->u.alive_frame;
-
-       IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
-                      "0x%01X 0x%01X\n",
-                      palive->is_valid, palive->ver_type,
-                      palive->ver_subtype);
-
-       if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
-               IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
-               memcpy(&priv->card_alive_init,
-                      &pkt->u.alive_frame,
-                      sizeof(struct iwl_init_alive_resp));
-               pwork = &priv->init_alive_start;
-       } else {
-               IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-               memcpy(&priv->card_alive, &pkt->u.alive_frame,
-                      sizeof(struct iwl_alive_resp));
-               pwork = &priv->alive_start;
-       }
-
-       /* We delay the ALIVE response by 5ms to
-        * give the HW RF Kill time to activate... */
-       if (palive->is_valid == UCODE_VALID_OK)
-               queue_delayed_work(priv->workqueue, pwork,
-                                  msecs_to_jiffies(5));
-       else
-               IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-/**
- * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
- *
- * This callback is provided in order to send a statistics request.
- *
- * This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
- * was received.  We need to ensure we receive the statistics in order
- * to update the temperature used for calibrating the TXPOWER.
- */
-static void iwl4965_bg_statistics_periodic(unsigned long data)
-{
-       struct iwl_priv *priv = (struct iwl_priv *)data;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       /* dont send host command if rf-kill is on */
-       if (!iwl_legacy_is_ready_rf(priv))
-               return;
-
-       iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl4965_beacon_notif *beacon =
-               (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
-       IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
-               "tsf %d %d rate %d\n",
-               le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
-               beacon->beacon_notify_hdr.failure_frame,
-               le32_to_cpu(beacon->ibss_mgr_status),
-               le32_to_cpu(beacon->high_tsf),
-               le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
-       priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
-{
-       unsigned long flags;
-
-       IWL_DEBUG_POWER(priv, "Stop all queues\n");
-
-       if (priv->mac80211_registered)
-               ieee80211_stop_queues(priv->hw);
-
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-                       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
-       iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
-       spin_lock_irqsave(&priv->reg_lock, flags);
-       if (!iwl_grab_nic_access(priv))
-               iwl_release_nic_access(priv);
-       spin_unlock_irqrestore(&priv->reg_lock, flags);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_mem_buffer *rxb)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
-       unsigned long status = priv->status;
-
-       IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
-                         (flags & HW_CARD_DISABLED) ? "Kill" : "On",
-                         (flags & SW_CARD_DISABLED) ? "Kill" : "On",
-                         (flags & CT_CARD_DISABLED) ?
-                         "Reached" : "Not reached");
-
-       if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
-                    CT_CARD_DISABLED)) {
-
-               iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
-                           CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-               iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
-                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
-               if (!(flags & RXON_CARD_DISABLED)) {
-                       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-                                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-                       iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
-                                       HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-               }
-       }
-
-       if (flags & CT_CARD_DISABLED)
-               iwl4965_perform_ct_kill_task(priv);
-
-       if (flags & HW_CARD_DISABLED)
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-       if (!(flags & RXON_CARD_DISABLED))
-               iwl_legacy_scan_cancel(priv);
-
-       if ((test_bit(STATUS_RF_KILL_HW, &status) !=
-            test_bit(STATUS_RF_KILL_HW, &priv->status)))
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-                       test_bit(STATUS_RF_KILL_HW, &priv->status));
-       else
-               wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
-{
-       priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
-       priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
-       priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
-       priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
-                       iwl_legacy_rx_spectrum_measure_notif;
-       priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
-       priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
-           iwl_legacy_rx_pm_debug_statistics_notif;
-       priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-
-       /*
-        * The same handler is used for both the REPLY to a discrete
-        * statistics request from the host as well as for the periodic
-        * statistics notifications (after received beacons) from the uCode.
-        */
-       priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
-       priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
-
-       iwl_legacy_setup_rx_scan_handlers(priv);
-
-       /* status change handler */
-       priv->rx_handlers[CARD_STATE_NOTIFICATION] =
-                                       iwl4965_rx_card_state_notif;
-
-       priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
-           iwl4965_rx_missed_beacon_notif;
-       /* Rx handlers */
-       priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
-       priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
-       /* block ack */
-       priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
-       /* Set up hardware specific Rx handlers */
-       priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
- * iwl4965_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-void iwl4965_rx_handle(struct iwl_priv *priv)
-{
-       struct iwl_rx_mem_buffer *rxb;
-       struct iwl_rx_packet *pkt;
-       struct iwl_rx_queue *rxq = &priv->rxq;
-       u32 r, i;
-       int reclaim;
-       unsigned long flags;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty;
-
-       /* uCode's read index (stored in shared DRAM) indicates the last Rx
-        * buffer that the driver may process (last buffer filled by ucode). */
-       r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
-       i = rxq->read;
-
-       /* Rx interrupt, but nothing sent from uCode */
-       if (i == r)
-               IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-
-       while (i != r) {
-               int len;
-
-               rxb = rxq->queue[i];
-
-               /* If an RXB doesn't have a Rx queue slot associated with it,
-                * then a bug has been introduced in the queue refilling
-                * routines -- catch it here */
-               BUG_ON(rxb == NULL);
-
-               rxq->queue[i] = NULL;
-
-               pci_unmap_page(priv->pci_dev, rxb->page_dma,
-                              PAGE_SIZE << priv->hw_params.rx_page_order,
-                              PCI_DMA_FROMDEVICE);
-               pkt = rxb_addr(rxb);
-
-               len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-               len += sizeof(u32); /* account for status word */
-               trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
-               /* Reclaim a command buffer only if this packet is a response
-                *   to a (driver-originated) command.
-                * If the packet (e.g. Rx frame) originated from uCode,
-                *   there is no command buffer to reclaim.
-                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-                *   but apparently a few don't get set; catch them here. */
-               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-                       (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
-                       (pkt->hdr.cmd != REPLY_RX) &&
-                       (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
-                       (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
-                       (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
-                       (pkt->hdr.cmd != REPLY_TX);
-
-               /* Based on type of command response or notification,
-                *   handle those that need handling via function in
-                *   rx_handlers table.  See iwl4965_setup_rx_handlers() */
-               if (priv->rx_handlers[pkt->hdr.cmd]) {
-                       IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
-                               i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
-                               pkt->hdr.cmd);
-                       priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
-                       priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
-               } else {
-                       /* No handling needed */
-                       IWL_DEBUG_RX(priv,
-                               "r %d i %d No handler needed for %s, 0x%02x\n",
-                               r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
-                               pkt->hdr.cmd);
-               }
-
-               /*
-                * XXX: After here, we should always check rxb->page
-                * against NULL before touching it or its virtual
-                * memory (pkt). Because some rx_handler might have
-                * already taken or freed the pages.
-                */
-
-               if (reclaim) {
-                       /* Invoke any callbacks, transfer the buffer to caller,
-                        * and fire off the (possibly) blocking iwl_legacy_send_cmd()
-                        * as we reclaim the driver command queue */
-                       if (rxb->page)
-                               iwl_legacy_tx_cmd_complete(priv, rxb);
-                       else
-                               IWL_WARN(priv, "Claim null rxb?\n");
-               }
-
-               /* Reuse the page if possible. For notification packets and
-                * SKBs that fail to Rx correctly, add them back into the
-                * rx_free list for reuse later. */
-               spin_lock_irqsave(&rxq->lock, flags);
-               if (rxb->page != NULL) {
-                       rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
-                               0, PAGE_SIZE << priv->hw_params.rx_page_order,
-                               PCI_DMA_FROMDEVICE);
-                       list_add_tail(&rxb->list, &rxq->rx_free);
-                       rxq->free_count++;
-               } else
-                       list_add_tail(&rxb->list, &rxq->rx_used);
-
-               spin_unlock_irqrestore(&rxq->lock, flags);
-
-               i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode wont assert. */
-               if (fill_rx) {
-                       count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               iwl4965_rx_replenish_now(priv);
-                               count = 0;
-                       }
-               }
-       }
-
-       /* Backtrack one entry */
-       rxq->read = i;
-       if (fill_rx)
-               iwl4965_rx_replenish_now(priv);
-       else
-               iwl4965_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
-{
-       /* wait to make sure we flush pending tasklet*/
-       synchronize_irq(priv->pci_dev->irq);
-       tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl4965_irq_tasklet(struct iwl_priv *priv)
-{
-       u32 inta, handled = 0;
-       u32 inta_fh;
-       unsigned long flags;
-       u32 i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       u32 inta_mask;
-#endif
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Ack/clear/reset pending uCode interrupts.
-        * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
-        *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
-       inta = iwl_read32(priv, CSR_INT);
-       iwl_write32(priv, CSR_INT, inta);
-
-       /* Ack/clear/reset pending flow-handler (DMA) interrupts.
-        * Any new interrupts that happen after this, either while we're
-        * in this tasklet, or later, will show up in next ISR/tasklet. */
-       inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-       iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
-               /* just for debug */
-               inta_mask = iwl_read32(priv, CSR_INT_MASK);
-               IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
-                             inta, inta_mask, inta_fh);
-       }
-#endif
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
-        * atomic, make sure that inta covers all the interrupts that
-        * we've discovered, even if FH interrupt came in just after
-        * reading CSR_INT. */
-       if (inta_fh & CSR49_FH_INT_RX_MASK)
-               inta |= CSR_INT_BIT_FH_RX;
-       if (inta_fh & CSR49_FH_INT_TX_MASK)
-               inta |= CSR_INT_BIT_FH_TX;
-
-       /* Now service all interrupt bits discovered above. */
-       if (inta & CSR_INT_BIT_HW_ERR) {
-               IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
-
-               /* Tell the device to stop sending interrupts */
-               iwl_legacy_disable_interrupts(priv);
-
-               priv->isr_stats.hw++;
-               iwl_legacy_irq_handle_error(priv);
-
-               handled |= CSR_INT_BIT_HW_ERR;
-
-               return;
-       }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-               /* NIC fires this, but we don't use it, redundant with WAKEUP */
-               if (inta & CSR_INT_BIT_SCD) {
-                       IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
-                                     "the frame/frames.\n");
-                       priv->isr_stats.sch++;
-               }
-
-               /* Alive notification via Rx interrupt will do the real work */
-               if (inta & CSR_INT_BIT_ALIVE) {
-                       IWL_DEBUG_ISR(priv, "Alive interrupt\n");
-                       priv->isr_stats.alive++;
-               }
-       }
-#endif
-       /* Safely ignore these bits for debug checks below */
-       inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
-       /* HW RF KILL switch toggled */
-       if (inta & CSR_INT_BIT_RF_KILL) {
-               int hw_rf_kill = 0;
-               if (!(iwl_read32(priv, CSR_GP_CNTRL) &
-                               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
-                       hw_rf_kill = 1;
-
-               IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
-                               hw_rf_kill ? "disable radio" : "enable radio");
-
-               priv->isr_stats.rfkill++;
-
-               /* driver only loads ucode once setting the interface up.
-                * the driver allows loading the ucode even if the radio
-                * is killed. Hence update the killswitch state here. The
-                * rfkill handler will care about restarting if needed.
-                */
-               if (!test_bit(STATUS_ALIVE, &priv->status)) {
-                       if (hw_rf_kill)
-                               set_bit(STATUS_RF_KILL_HW, &priv->status);
-                       else
-                               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-                       wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
-               }
-
-               handled |= CSR_INT_BIT_RF_KILL;
-       }
-
-       /* Chip got too hot and stopped itself */
-       if (inta & CSR_INT_BIT_CT_KILL) {
-               IWL_ERR(priv, "Microcode CT kill error detected.\n");
-               priv->isr_stats.ctkill++;
-               handled |= CSR_INT_BIT_CT_KILL;
-       }
-
-       /* Error detected by uCode */
-       if (inta & CSR_INT_BIT_SW_ERR) {
-               IWL_ERR(priv, "Microcode SW error detected. "
-                       " Restarting 0x%X.\n", inta);
-               priv->isr_stats.sw++;
-               iwl_legacy_irq_handle_error(priv);
-               handled |= CSR_INT_BIT_SW_ERR;
-       }
-
-       /*
-        * uCode wakes up after power-down sleep.
-        * Tell device about any new tx or host commands enqueued,
-        * and about any Rx buffers made available while asleep.
-        */
-       if (inta & CSR_INT_BIT_WAKEUP) {
-               IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
-               iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
-               for (i = 0; i < priv->hw_params.max_txq_num; i++)
-                       iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
-               priv->isr_stats.wakeup++;
-               handled |= CSR_INT_BIT_WAKEUP;
-       }
-
-       /* All uCode command responses, including Tx command responses,
-        * Rx "responses" (frame-received notification), and other
-        * notifications from uCode come through here*/
-       if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
-               iwl4965_rx_handle(priv);
-               priv->isr_stats.rx++;
-               handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
-       }
-
-       /* This "Tx" DMA channel is used only for loading uCode */
-       if (inta & CSR_INT_BIT_FH_TX) {
-               IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
-               priv->isr_stats.tx++;
-               handled |= CSR_INT_BIT_FH_TX;
-               /* Wake up uCode load routine, now that load is complete */
-               priv->ucode_write_complete = 1;
-               wake_up(&priv->wait_command_queue);
-       }
-
-       if (inta & ~handled) {
-               IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
-               priv->isr_stats.unhandled++;
-       }
-
-       if (inta & ~(priv->inta_mask)) {
-               IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
-                        inta & ~priv->inta_mask);
-               IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
-       }
-
-       /* Re-enable all interrupts */
-       /* only Re-enable if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &priv->status))
-               iwl_legacy_enable_interrupts(priv);
-       /* Re-enable RF_KILL if it occurred */
-       else if (handled & CSR_INT_BIT_RF_KILL)
-               iwl_legacy_enable_rfkill_int(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
-               inta = iwl_read32(priv, CSR_INT);
-               inta_mask = iwl_read32(priv, CSR_INT_MASK);
-               inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-               IWL_DEBUG_ISR(priv,
-                       "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
-                       "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
-       }
-#endif
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl4965_show_debug_level(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl4965_store_debug_level(struct device *d,
-                               struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       unsigned long val;
-       int ret;
-
-       ret = strict_strtoul(buf, 0, &val);
-       if (ret)
-               IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
-       else {
-               priv->debug_level = val;
-               if (iwl_legacy_alloc_traffic_mem(priv))
-                       IWL_ERR(priv,
-                               "Not enough memory to generate traffic log\n");
-       }
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-                       iwl4965_show_debug_level, iwl4965_store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-
-static ssize_t iwl4965_show_temperature(struct device *d,
-                               struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       if (!iwl_legacy_is_alive(priv))
-               return -EAGAIN;
-
-       return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
-
-static ssize_t iwl4965_show_tx_power(struct device *d,
-                            struct device_attribute *attr, char *buf)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-
-       if (!iwl_legacy_is_ready_rf(priv))
-               return sprintf(buf, "off\n");
-       else
-               return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl4965_store_tx_power(struct device *d,
-                             struct device_attribute *attr,
-                             const char *buf, size_t count)
-{
-       struct iwl_priv *priv = dev_get_drvdata(d);
-       unsigned long val;
-       int ret;
-
-       ret = strict_strtoul(buf, 10, &val);
-       if (ret)
-               IWL_INFO(priv, "%s is not in decimal form.\n", buf);
-       else {
-               ret = iwl_legacy_set_tx_power(priv, val, false);
-               if (ret)
-                       IWL_ERR(priv, "failed setting tx power (0x%d).\n",
-                               ret);
-               else
-                       ret = count;
-       }
-       return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
-                       iwl4965_show_tx_power, iwl4965_store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
-       &dev_attr_temperature.attr,
-       &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-       &dev_attr_debug_level.attr,
-#endif
-       NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
-       .name = NULL,           /* put in device directory */
-       .attrs = iwl_sysfs_entries,
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
-{
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-       iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-static void iwl4965_nic_start(struct iwl_priv *priv)
-{
-       /* Remove all resets to allow NIC to operate */
-       iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
-                                       void *context);
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
-                                               u32 max_probe_length);
-
-static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
-{
-       const char *name_pre = priv->cfg->fw_name_pre;
-       char tag[8];
-
-       if (first) {
-               priv->fw_index = priv->cfg->ucode_api_max;
-               sprintf(tag, "%d", priv->fw_index);
-       } else {
-               priv->fw_index--;
-               sprintf(tag, "%d", priv->fw_index);
-       }
-
-       if (priv->fw_index < priv->cfg->ucode_api_min) {
-               IWL_ERR(priv, "no suitable firmware found!\n");
-               return -ENOENT;
-       }
-
-       sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
-       IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
-                      priv->firmware_name);
-
-       return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
-                                      &priv->pci_dev->dev, GFP_KERNEL, priv,
-                                      iwl4965_ucode_callback);
-}
-
-struct iwl4965_firmware_pieces {
-       const void *inst, *data, *init, *init_data, *boot;
-       size_t inst_size, data_size, init_size, init_data_size, boot_size;
-};
-
-static int iwl4965_load_firmware(struct iwl_priv *priv,
-                                      const struct firmware *ucode_raw,
-                                      struct iwl4965_firmware_pieces *pieces)
-{
-       struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
-       u32 api_ver, hdr_size;
-       const u8 *src;
-
-       priv->ucode_ver = le32_to_cpu(ucode->ver);
-       api_ver = IWL_UCODE_API(priv->ucode_ver);
-
-       switch (api_ver) {
-       default:
-       case 0:
-       case 1:
-       case 2:
-               hdr_size = 24;
-               if (ucode_raw->size < hdr_size) {
-                       IWL_ERR(priv, "File size too small!\n");
-                       return -EINVAL;
-               }
-               pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
-               pieces->data_size = le32_to_cpu(ucode->v1.data_size);
-               pieces->init_size = le32_to_cpu(ucode->v1.init_size);
-               pieces->init_data_size =
-                               le32_to_cpu(ucode->v1.init_data_size);
-               pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
-               src = ucode->v1.data;
-               break;
-       }
-
-       /* Verify size of file vs. image size info in file's header */
-       if (ucode_raw->size != hdr_size + pieces->inst_size +
-                               pieces->data_size + pieces->init_size +
-                               pieces->init_data_size + pieces->boot_size) {
-
-               IWL_ERR(priv,
-                       "uCode file size %d does not match expected size\n",
-                       (int)ucode_raw->size);
-               return -EINVAL;
-       }
-
-       pieces->inst = src;
-       src += pieces->inst_size;
-       pieces->data = src;
-       src += pieces->data_size;
-       pieces->init = src;
-       src += pieces->init_size;
-       pieces->init_data = src;
-       src += pieces->init_data_size;
-       pieces->boot = src;
-       src += pieces->boot_size;
-
-       return 0;
-}
-
-/**
- * iwl4965_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void
-iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
-       struct iwl_priv *priv = context;
-       struct iwl_ucode_header *ucode;
-       int err;
-       struct iwl4965_firmware_pieces pieces;
-       const unsigned int api_max = priv->cfg->ucode_api_max;
-       const unsigned int api_min = priv->cfg->ucode_api_min;
-       u32 api_ver;
-
-       u32 max_probe_length = 200;
-       u32 standard_phy_calibration_size =
-                       IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
-       memset(&pieces, 0, sizeof(pieces));
-
-       if (!ucode_raw) {
-               if (priv->fw_index <= priv->cfg->ucode_api_max)
-                       IWL_ERR(priv,
-                               "request for firmware file '%s' failed.\n",
-                               priv->firmware_name);
-               goto try_again;
-       }
-
-       IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
-                      priv->firmware_name, ucode_raw->size);
-
-       /* Make sure that we got at least the API version number */
-       if (ucode_raw->size < 4) {
-               IWL_ERR(priv, "File size way too small!\n");
-               goto try_again;
-       }
-
-       /* Data from ucode file:  header followed by uCode images */
-       ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
-       err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
-
-       if (err)
-               goto try_again;
-
-       api_ver = IWL_UCODE_API(priv->ucode_ver);
-
-       /*
-        * api_ver should match the api version forming part of the
-        * firmware filename ... but we don't check for that and only rely
-        * on the API version read from firmware header from here on forward
-        */
-       if (api_ver < api_min || api_ver > api_max) {
-               IWL_ERR(priv,
-                       "Driver unable to support your firmware API. "
-                       "Driver supports v%u, firmware is v%u.\n",
-                       api_max, api_ver);
-               goto try_again;
-       }
-
-       if (api_ver != api_max)
-               IWL_ERR(priv,
-                       "Firmware has old API version. Expected v%u, "
-                       "got v%u. New firmware can be obtained "
-                       "from http://www.intellinuxwireless.org.\n",
-                       api_max, api_ver);
-
-       IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
-                IWL_UCODE_MAJOR(priv->ucode_ver),
-                IWL_UCODE_MINOR(priv->ucode_ver),
-                IWL_UCODE_API(priv->ucode_ver),
-                IWL_UCODE_SERIAL(priv->ucode_ver));
-
-       snprintf(priv->hw->wiphy->fw_version,
-                sizeof(priv->hw->wiphy->fw_version),
-                "%u.%u.%u.%u",
-                IWL_UCODE_MAJOR(priv->ucode_ver),
-                IWL_UCODE_MINOR(priv->ucode_ver),
-                IWL_UCODE_API(priv->ucode_ver),
-                IWL_UCODE_SERIAL(priv->ucode_ver));
-
-       /*
-        * For any of the failures below (before allocating pci memory)
-        * we will try to load a version with a smaller API -- maybe the
-        * user just got a corrupted version of the latest API.
-        */
-
-       IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
-                      priv->ucode_ver);
-       IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
-                      pieces.inst_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
-                      pieces.data_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
-                      pieces.init_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
-                      pieces.init_data_size);
-       IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
-                      pieces.boot_size);
-
-       /* Verify that uCode images will fit in card's SRAM */
-       if (pieces.inst_size > priv->hw_params.max_inst_size) {
-               IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
-                       pieces.inst_size);
-               goto try_again;
-       }
-
-       if (pieces.data_size > priv->hw_params.max_data_size) {
-               IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
-                       pieces.data_size);
-               goto try_again;
-       }
-
-       if (pieces.init_size > priv->hw_params.max_inst_size) {
-               IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
-                       pieces.init_size);
-               goto try_again;
-       }
-
-       if (pieces.init_data_size > priv->hw_params.max_data_size) {
-               IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
-                       pieces.init_data_size);
-               goto try_again;
-       }
-
-       if (pieces.boot_size > priv->hw_params.max_bsm_size) {
-               IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
-                       pieces.boot_size);
-               goto try_again;
-       }
-
-       /* Allocate ucode buffers for card's bus-master loading ... */
-
-       /* Runtime instructions and 2 copies of data:
-        * 1) unmodified from disk
-        * 2) backup cache for save/restore during power-downs */
-       priv->ucode_code.len = pieces.inst_size;
-       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
-       priv->ucode_data.len = pieces.data_size;
-       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
-       priv->ucode_data_backup.len = pieces.data_size;
-       iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
-       if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
-           !priv->ucode_data_backup.v_addr)
-               goto err_pci_alloc;
-
-       /* Initialization instructions and data */
-       if (pieces.init_size && pieces.init_data_size) {
-               priv->ucode_init.len = pieces.init_size;
-               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
-               priv->ucode_init_data.len = pieces.init_data_size;
-               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
-               if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
-                       goto err_pci_alloc;
-       }
-
-       /* Bootstrap (instructions only, no data) */
-       if (pieces.boot_size) {
-               priv->ucode_boot.len = pieces.boot_size;
-               iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
-               if (!priv->ucode_boot.v_addr)
-                       goto err_pci_alloc;
-       }
-
-       /* Now that we can no longer fail, copy information */
-
-       priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
-       /* Copy images into buffers for card's bus-master reads ... */
-
-       /* Runtime instructions (first block of data in file) */
-       IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
-                       pieces.inst_size);
-       memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
-
-       IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
-               priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
-       /*
-        * Runtime data
-        * NOTE:  Copy into backup buffer will be done in iwl_up()
-        */
-       IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
-                       pieces.data_size);
-       memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
-       memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
-
-       /* Initialization instructions */
-       if (pieces.init_size) {
-               IWL_DEBUG_INFO(priv,
-                               "Copying (but not loading) init instr len %Zd\n",
-                               pieces.init_size);
-               memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
-       }
-
-       /* Initialization data */
-       if (pieces.init_data_size) {
-               IWL_DEBUG_INFO(priv,
-                               "Copying (but not loading) init data len %Zd\n",
-                              pieces.init_data_size);
-               memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
-                      pieces.init_data_size);
-       }
-
-       /* Bootstrap instructions */
-       IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
-                       pieces.boot_size);
-       memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
-
-       /*
-        * figure out the offset of chain noise reset and gain commands
-        * base on the size of standard phy calibration commands table size
-        */
-       priv->_4965.phy_calib_chain_noise_reset_cmd =
-               standard_phy_calibration_size;
-       priv->_4965.phy_calib_chain_noise_gain_cmd =
-               standard_phy_calibration_size + 1;
-
-       /**************************************************
-        * This is still part of probe() in a sense...
-        *
-        * 9. Setup and register with mac80211 and debugfs
-        **************************************************/
-       err = iwl4965_mac_setup_register(priv, max_probe_length);
-       if (err)
-               goto out_unbind;
-
-       err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
-       if (err)
-               IWL_ERR(priv,
-               "failed to create debugfs files. Ignoring error: %d\n", err);
-
-       err = sysfs_create_group(&priv->pci_dev->dev.kobj,
-                                       &iwl_attribute_group);
-       if (err) {
-               IWL_ERR(priv, "failed to create sysfs device attributes\n");
-               goto out_unbind;
-       }
-
-       /* We have our copies now, allow OS release its copies */
-       release_firmware(ucode_raw);
-       complete(&priv->_4965.firmware_loading_complete);
-       return;
-
- try_again:
-       /* try next, if any */
-       if (iwl4965_request_firmware(priv, false))
-               goto out_unbind;
-       release_firmware(ucode_raw);
-       return;
-
- err_pci_alloc:
-       IWL_ERR(priv, "failed to allocate pci memory\n");
-       iwl4965_dealloc_ucode_pci(priv);
- out_unbind:
-       complete(&priv->_4965.firmware_loading_complete);
-       device_release_driver(&priv->pci_dev->dev);
-       release_firmware(ucode_raw);
-}
-
-static const char * const desc_lookup_text[] = {
-       "OK",
-       "FAIL",
-       "BAD_PARAM",
-       "BAD_CHECKSUM",
-       "NMI_INTERRUPT_WDG",
-       "SYSASSERT",
-       "FATAL_ERROR",
-       "BAD_COMMAND",
-       "HW_ERROR_TUNE_LOCK",
-       "HW_ERROR_TEMPERATURE",
-       "ILLEGAL_CHAN_FREQ",
-       "VCC_NOT_STABLE",
-       "FH_ERROR",
-       "NMI_INTERRUPT_HOST",
-       "NMI_INTERRUPT_ACTION_PT",
-       "NMI_INTERRUPT_UNKNOWN",
-       "UCODE_VERSION_MISMATCH",
-       "HW_ERROR_ABS_LOCK",
-       "HW_ERROR_CAL_LOCK_FAIL",
-       "NMI_INTERRUPT_INST_ACTION_PT",
-       "NMI_INTERRUPT_DATA_ACTION_PT",
-       "NMI_TRM_HW_ER",
-       "NMI_INTERRUPT_TRM",
-       "NMI_INTERRUPT_BREAK_POINT",
-       "DEBUG_0",
-       "DEBUG_1",
-       "DEBUG_2",
-       "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
-       { "NMI_INTERRUPT_WDG", 0x34 },
-       { "SYSASSERT", 0x35 },
-       { "UCODE_VERSION_MISMATCH", 0x37 },
-       { "BAD_COMMAND", 0x38 },
-       { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
-       { "FATAL_ERROR", 0x3D },
-       { "NMI_TRM_HW_ERR", 0x46 },
-       { "NMI_INTERRUPT_TRM", 0x4C },
-       { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
-       { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
-       { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
-       { "NMI_INTERRUPT_HOST", 0x66 },
-       { "NMI_INTERRUPT_ACTION_PT", 0x7C },
-       { "NMI_INTERRUPT_UNKNOWN", 0x84 },
-       { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
-       { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *iwl4965_desc_lookup(u32 num)
-{
-       int i;
-       int max = ARRAY_SIZE(desc_lookup_text);
-
-       if (num < max)
-               return desc_lookup_text[num];
-
-       max = ARRAY_SIZE(advanced_lookup) - 1;
-       for (i = 0; i < max; i++) {
-               if (advanced_lookup[i].num == num)
-                       break;
-       }
-       return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET  (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
-
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
-{
-       u32 data2, line;
-       u32 desc, time, count, base, data1;
-       u32 blink1, blink2, ilink1, ilink2;
-       u32 pc, hcmd;
-
-       if (priv->ucode_type == UCODE_INIT) {
-               base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
-       } else {
-               base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-       }
-
-       if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
-               IWL_ERR(priv,
-                       "Not valid error log pointer 0x%08X for %s uCode\n",
-                       base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
-               return;
-       }
-
-       count = iwl_legacy_read_targ_mem(priv, base);
-
-       if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
-               IWL_ERR(priv, "Start IWL Error Log Dump:\n");
-               IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
-                       priv->status, count);
-       }
-
-       desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
-       priv->isr_stats.err_code = desc;
-       pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
-       blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
-       blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
-       ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
-       ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
-       data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
-       data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
-       line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
-       time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
-       hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
-
-       trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
-                                       time, data1, data2, line,
-                                     blink1, blink2, ilink1, ilink2);
-
-       IWL_ERR(priv, "Desc                                  Time       "
-               "data1      data2      line\n");
-       IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
-               iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
-       IWL_ERR(priv, "pc      blink1  blink2  ilink1  ilink2  hcmd\n");
-       IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
-               pc, blink1, blink2, ilink1, ilink2, hcmd);
-}
-
-static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
-{
-       struct iwl_ct_kill_config cmd;
-       unsigned long flags;
-       int ret = 0;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-                   CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       cmd.critical_temperature_R =
-               cpu_to_le32(priv->hw_params.ct_kill_threshold);
-
-       ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
-                              sizeof(cmd), &cmd);
-       if (ret)
-               IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
-       else
-               IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
-                               "succeeded, "
-                               "critical temperature is %d\n",
-                               priv->hw_params.ct_kill_threshold);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
-       IWL_TX_FIFO_VO,
-       IWL_TX_FIFO_VI,
-       IWL_TX_FIFO_BE,
-       IWL_TX_FIFO_BK,
-       IWL49_CMD_FIFO_NUM,
-       IWL_TX_FIFO_UNUSED,
-       IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
-       u32 a;
-       unsigned long flags;
-       int i, chan;
-       u32 reg_val;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Clear 4965's internal Tx Scheduler data base */
-       priv->scd_base_addr = iwl_legacy_read_prph(priv,
-                                       IWL49_SCD_SRAM_BASE_ADDR);
-       a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
-       for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
-               iwl_legacy_write_targ_mem(priv, a, 0);
-       for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
-               iwl_legacy_write_targ_mem(priv, a, 0);
-       for (; a < priv->scd_base_addr +
-              IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
-               iwl_legacy_write_targ_mem(priv, a, 0);
-
-       /* Tel 4965 where to find Tx byte count tables */
-       iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
-                       priv->scd_bc_tbls.dma >> 10);
-
-       /* Enable DMA channel */
-       for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
-               iwl_legacy_write_direct32(priv,
-                               FH_TCSR_CHNL_TX_CONFIG_REG(chan),
-                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
-                               FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
-       /* Update FH chicken bits */
-       reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
-       iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
-                          reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
-       /* Disable chain mode for all queues */
-       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
-       /* Initialize each Tx queue (including the command queue) */
-       for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
-               /* TFD circular buffer read/write indexes */
-               iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
-               iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
-               /* Max Tx Window size for Scheduler-ACK mode */
-               iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
-                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
-                               (SCD_WIN_SIZE <<
-                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
-                               IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
-               /* Frame limit */
-               iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
-                               IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
-                               sizeof(u32),
-                               (SCD_FRAME_LIMIT <<
-                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
-                               IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
-       }
-       iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
-                                (1 << priv->hw_params.max_txq_num) - 1);
-
-       /* Activate all Tx DMA/FIFO channels */
-       iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
-
-       iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
-       /* make sure all queue are not stopped */
-       memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
-       for (i = 0; i < 4; i++)
-               atomic_set(&priv->queue_stop_count[i], 0);
-
-       /* reset to 0 to enable all the queue first */
-       priv->txq_ctx_active_msk = 0;
-       /* Map each Tx/cmd queue to its corresponding fifo */
-       BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
-       for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
-               int ac = default_queue_to_tx_fifo[i];
-
-               iwl_txq_ctx_activate(priv, i);
-
-               if (ac == IWL_TX_FIFO_UNUSED)
-                       continue;
-
-               iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
-       }
-
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-/**
- * iwl4965_alive_start - called after REPLY_ALIVE notification received
- *                   from protocol/runtime uCode (initialization uCode's
- *                   Alive gets handled by iwl_init_alive_start()).
- */
-static void iwl4965_alive_start(struct iwl_priv *priv)
-{
-       int ret = 0;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-       IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
-       if (priv->card_alive.is_valid != UCODE_VALID_OK) {
-               /* We had an error bringing up the hardware, so take it
-                * all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Alive failed.\n");
-               goto restart;
-       }
-
-       /* Initialize uCode has loaded Runtime uCode ... verify inst image.
-        * This is a paranoid check, because we would not have gotten the
-        * "runtime" alive if code weren't properly loaded.  */
-       if (iwl4965_verify_ucode(priv)) {
-               /* Runtime instruction load was bad;
-                * take it all the way back down so we can try again */
-               IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
-               goto restart;
-       }
-
-       ret = iwl4965_alive_notify(priv);
-       if (ret) {
-               IWL_WARN(priv,
-                       "Could not complete ALIVE transition [ntf]: %d\n", ret);
-               goto restart;
-       }
-
-
-       /* After the ALIVE response, we can send host commands to the uCode */
-       set_bit(STATUS_ALIVE, &priv->status);
-
-       /* Enable watchdog to monitor the driver tx queues */
-       iwl_legacy_setup_watchdog(priv);
-
-       if (iwl_legacy_is_rfkill(priv))
-               return;
-
-       ieee80211_wake_queues(priv->hw);
-
-       priv->active_rate = IWL_RATES_MASK;
-
-       if (iwl_legacy_is_associated_ctx(ctx)) {
-               struct iwl_legacy_rxon_cmd *active_rxon =
-                               (struct iwl_legacy_rxon_cmd *)&ctx->active;
-               /* apply any changes in staging */
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-               active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-       } else {
-               struct iwl_rxon_context *tmp;
-               /* Initialize our rx_config data */
-               for_each_context(priv, tmp)
-                       iwl_legacy_connection_init_rx_config(priv, tmp);
-
-               if (priv->cfg->ops->hcmd->set_rxon_chain)
-                       priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-       }
-
-       /* Configure bluetooth coexistence if enabled */
-       iwl_legacy_send_bt_config(priv);
-
-       iwl4965_reset_run_time_calib(priv);
-
-       set_bit(STATUS_READY, &priv->status);
-
-       /* Configure the adapter for unassociated operation */
-       iwl_legacy_commit_rxon(priv, ctx);
-
-       /* At this point, the NIC is initialized and operational */
-       iwl4965_rf_kill_ct_config(priv);
-
-       IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
-       wake_up(&priv->wait_command_queue);
-
-       iwl_legacy_power_update_mode(priv, true);
-       IWL_DEBUG_INFO(priv, "Updated power mode\n");
-
-       return;
-
- restart:
-       queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl4965_down(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       int exit_pending;
-
-       IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
-       iwl_legacy_scan_cancel_timeout(priv, 200);
-
-       exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
-        * to prevent rearm timer */
-       del_timer_sync(&priv->watchdog);
-
-       iwl_legacy_clear_ucode_stations(priv, NULL);
-       iwl_legacy_dealloc_bcast_stations(priv);
-       iwl_legacy_clear_driver_stations(priv);
-
-       /* Unblock any waiting calls */
-       wake_up_all(&priv->wait_command_queue);
-
-       /* Wipe out the EXIT_PENDING status bit if we are not actually
-        * exiting the module */
-       if (!exit_pending)
-               clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* stop and reset the on-board processor */
-       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-       /* tell the device to stop sending interrupts */
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_legacy_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-       iwl4965_synchronize_irq(priv);
-
-       if (priv->mac80211_registered)
-               ieee80211_stop_queues(priv->hw);
-
-       /* If we have not previously called iwl_init() then
-        * clear all bits but the RF Kill bit and return */
-       if (!iwl_legacy_is_init(priv)) {
-               priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-                                       STATUS_RF_KILL_HW |
-                              test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-                                       STATUS_GEO_CONFIGURED |
-                              test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-                                       STATUS_EXIT_PENDING;
-               goto exit;
-       }
-
-       /* ...otherwise clear out all the status bits but the RF Kill
-        * bit and continue taking the NIC down. */
-       priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
-                               STATUS_RF_KILL_HW |
-                       test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
-                               STATUS_GEO_CONFIGURED |
-                       test_bit(STATUS_FW_ERROR, &priv->status) <<
-                               STATUS_FW_ERROR |
-                      test_bit(STATUS_EXIT_PENDING, &priv->status) <<
-                               STATUS_EXIT_PENDING;
-
-       iwl4965_txq_ctx_stop(priv);
-       iwl4965_rxq_stop(priv);
-
-       /* Power-down device's busmaster DMA clocks */
-       iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-       udelay(5);
-
-       /* Make sure (redundant) we've released our request to stay awake */
-       iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
-                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
-       /* Stop the device, and put it in low power state */
-       iwl_legacy_apm_stop(priv);
-
- exit:
-       memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
-       dev_kfree_skb(priv->beacon_skb);
-       priv->beacon_skb = NULL;
-
-       /* clear out any free frames */
-       iwl4965_clear_free_frames(priv);
-}
-
-static void iwl4965_down(struct iwl_priv *priv)
-{
-       mutex_lock(&priv->mutex);
-       __iwl4965_down(priv);
-       mutex_unlock(&priv->mutex);
-
-       iwl4965_cancel_deferred_work(priv);
-}
-
-#define HW_READY_TIMEOUT (50)
-
-static int iwl4965_set_hw_ready(struct iwl_priv *priv)
-{
-       int ret = 0;
-
-       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
-       /* See if we got it */
-       ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
-                               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
-                               CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
-                               HW_READY_TIMEOUT);
-       if (ret != -ETIMEDOUT)
-               priv->hw_ready = true;
-       else
-               priv->hw_ready = false;
-
-       IWL_DEBUG_INFO(priv, "hardware %s\n",
-                     (priv->hw_ready == 1) ? "ready" : "not ready");
-       return ret;
-}
-
-static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
-{
-       int ret = 0;
-
-       IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
-
-       ret = iwl4965_set_hw_ready(priv);
-       if (priv->hw_ready)
-               return ret;
-
-       /* If HW is not ready, prepare the conditions to check again */
-       iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
-                       CSR_HW_IF_CONFIG_REG_PREPARE);
-
-       ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
-                       ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
-                       CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
-       /* HW should be ready by now, check again. */
-       if (ret != -ETIMEDOUT)
-               iwl4965_set_hw_ready(priv);
-
-       return ret;
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int __iwl4965_up(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx;
-       int i;
-       int ret;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-               return -EIO;
-       }
-
-       if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
-               IWL_ERR(priv, "ucode not available for device bringup\n");
-               return -EIO;
-       }
-
-       for_each_context(priv, ctx) {
-               ret = iwl4965_alloc_bcast_station(priv, ctx);
-               if (ret) {
-                       iwl_legacy_dealloc_bcast_stations(priv);
-                       return ret;
-               }
-       }
-
-       iwl4965_prepare_card_hw(priv);
-
-       if (!priv->hw_ready) {
-               IWL_WARN(priv, "Exit HW not ready\n");
-               return -EIO;
-       }
-
-       /* If platform's RF_KILL switch is NOT set to KILL */
-       if (iwl_read32(priv,
-               CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-       if (iwl_legacy_is_rfkill(priv)) {
-               wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
-
-               iwl_legacy_enable_interrupts(priv);
-               IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
-               return 0;
-       }
-
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
-       /* must be initialised before iwl_hw_nic_init */
-       priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
-       ret = iwl4965_hw_nic_init(priv);
-       if (ret) {
-               IWL_ERR(priv, "Unable to init nic\n");
-               return ret;
-       }
-
-       /* make sure rfkill handshake bits are cleared */
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
-                   CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
-       /* clear (again), then enable host interrupts */
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-       iwl_legacy_enable_interrupts(priv);
-
-       /* really make sure rfkill handshake bits are cleared */
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-       iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
-       /* Copy original ucode data image from disk into backup cache.
-        * This will be used to initialize the on-board processor's
-        * data SRAM for a clean start when the runtime program first loads. */
-       memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
-              priv->ucode_data.len);
-
-       for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
-               /* load bootstrap state machine,
-                * load bootstrap program into processor's memory,
-                * prepare to load the "initialize" uCode */
-               ret = priv->cfg->ops->lib->load_ucode(priv);
-
-               if (ret) {
-                       IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
-                               ret);
-                       continue;
-               }
-
-               /* start card; "initialize" will load runtime ucode */
-               iwl4965_nic_start(priv);
-
-               IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
-               return 0;
-       }
-
-       set_bit(STATUS_EXIT_PENDING, &priv->status);
-       __iwl4965_down(priv);
-       clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       /* tried to restart and config the device for as long as our
-        * patience could withstand */
-       IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
-       return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl4965_bg_init_alive_start(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, init_alive_start.work);
-
-       mutex_lock(&priv->mutex);
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               goto out;
-
-       priv->cfg->ops->lib->init_alive_start(priv);
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_alive_start(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, alive_start.work);
-
-       mutex_lock(&priv->mutex);
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               goto out;
-
-       iwl4965_alive_start(priv);
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                       run_time_calib_work);
-
-       mutex_lock(&priv->mutex);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status)) {
-               mutex_unlock(&priv->mutex);
-               return;
-       }
-
-       if (priv->start_calib) {
-               iwl4965_chain_noise_calibration(priv,
-                               (void *)&priv->_4965.statistics);
-               iwl4965_sensitivity_calibration(priv,
-                               (void *)&priv->_4965.statistics);
-       }
-
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_restart(struct work_struct *data)
-{
-       struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
-               struct iwl_rxon_context *ctx;
-
-               mutex_lock(&priv->mutex);
-               for_each_context(priv, ctx)
-                       ctx->vif = NULL;
-               priv->is_open = 0;
-
-               __iwl4965_down(priv);
-
-               mutex_unlock(&priv->mutex);
-               iwl4965_cancel_deferred_work(priv);
-               ieee80211_restart_hw(priv->hw);
-       } else {
-               iwl4965_down(priv);
-
-               mutex_lock(&priv->mutex);
-               if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
-                       mutex_unlock(&priv->mutex);
-                       return;
-               }
-
-               __iwl4965_up(priv);
-               mutex_unlock(&priv->mutex);
-       }
-}
-
-static void iwl4965_bg_rx_replenish(struct work_struct *data)
-{
-       struct iwl_priv *priv =
-           container_of(data, struct iwl_priv, rx_replenish);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
-       iwl4965_rx_replenish(priv);
-       mutex_unlock(&priv->mutex);
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT    (4 * HZ)
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
-                                 u32 max_probe_length)
-{
-       int ret;
-       struct ieee80211_hw *hw = priv->hw;
-       struct iwl_rxon_context *ctx;
-
-       hw->rate_control_algorithm = "iwl-4965-rs";
-
-       /* Tell mac80211 our characteristics */
-       hw->flags = IEEE80211_HW_SIGNAL_DBM |
-                   IEEE80211_HW_AMPDU_AGGREGATION |
-                   IEEE80211_HW_NEED_DTIM_PERIOD |
-                   IEEE80211_HW_SPECTRUM_MGMT |
-                   IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
-       if (priv->cfg->sku & IWL_SKU_N)
-               hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-                            IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
-       hw->sta_data_size = sizeof(struct iwl_station_priv);
-       hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-       for_each_context(priv, ctx) {
-               hw->wiphy->interface_modes |= ctx->interface_modes;
-               hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
-       }
-
-       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
-       /*
-        * For now, disable PS by default because it affects
-        * RX performance significantly.
-        */
-       hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
-       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
-       /* we create the 802.11 header and a zero-length SSID element */
-       hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
-
-       /* Default value; 4 EDCA QOS priorities */
-       hw->queues = 4;
-
-       hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
-       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &priv->bands[IEEE80211_BAND_2GHZ];
-       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &priv->bands[IEEE80211_BAND_5GHZ];
-
-       iwl_legacy_leds_init(priv);
-
-       ret = ieee80211_register_hw(priv->hw);
-       if (ret) {
-               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-               return ret;
-       }
-       priv->mac80211_registered = 1;
-
-       return 0;
-}
-
-
-int iwl4965_mac_start(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       /* we should be verifying the device is ready to be opened */
-       mutex_lock(&priv->mutex);
-       ret = __iwl4965_up(priv);
-       mutex_unlock(&priv->mutex);
-
-       if (ret)
-               return ret;
-
-       if (iwl_legacy_is_rfkill(priv))
-               goto out;
-
-       IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
-       /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
-        * mac80211 will not be run successfully. */
-       ret = wait_event_timeout(priv->wait_command_queue,
-                       test_bit(STATUS_READY, &priv->status),
-                       UCODE_READY_TIMEOUT);
-       if (!ret) {
-               if (!test_bit(STATUS_READY, &priv->status)) {
-                       IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
-                               jiffies_to_msecs(UCODE_READY_TIMEOUT));
-                       return -ETIMEDOUT;
-               }
-       }
-
-       iwl4965_led_enable(priv);
-
-out:
-       priv->is_open = 1;
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
-}
-
-void iwl4965_mac_stop(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (!priv->is_open)
-               return;
-
-       priv->is_open = 0;
-
-       iwl4965_down(priv);
-
-       flush_workqueue(priv->workqueue);
-
-       /* User space software may expect getting rfkill changes
-        * even if interface is down */
-       iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-       iwl_legacy_enable_rfkill_int(priv);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MACDUMP(priv, "enter\n");
-
-       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-       if (iwl4965_tx_skb(priv, skb))
-               dev_kfree_skb_any(skb);
-
-       IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
-                               struct ieee80211_vif *vif,
-                               struct ieee80211_key_conf *keyconf,
-                               struct ieee80211_sta *sta,
-                               u32 iv32, u16 *phase1key)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
-                           iv32, phase1key);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
-                      struct ieee80211_key_conf *key)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       struct iwl_rxon_context *ctx = vif_priv->ctx;
-       int ret;
-       u8 sta_id;
-       bool is_default_wep_key = false;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (priv->cfg->mod_params->sw_crypto) {
-               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-               return -EOPNOTSUPP;
-       }
-
-       sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
-       if (sta_id == IWL_INVALID_STATION)
-               return -EINVAL;
-
-       mutex_lock(&priv->mutex);
-       iwl_legacy_scan_cancel_timeout(priv, 100);
-
-       /*
-        * If we are getting WEP group key and we didn't receive any key mapping
-        * so far, we are in legacy wep mode (group key only), otherwise we are
-        * in 1X mode.
-        * In legacy wep mode, we use another host command to the uCode.
-        */
-       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-            key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
-           !sta) {
-               if (cmd == SET_KEY)
-                       is_default_wep_key = !ctx->key_mapping_keys;
-               else
-                       is_default_wep_key =
-                                       (key->hw_key_idx == HW_KEY_DEFAULT);
-       }
-
-       switch (cmd) {
-       case SET_KEY:
-               if (is_default_wep_key)
-                       ret = iwl4965_set_default_wep_key(priv,
-                                                       vif_priv->ctx, key);
-               else
-                       ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
-                                                 key, sta_id);
-
-               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-               break;
-       case DISABLE_KEY:
-               if (is_default_wep_key)
-                       ret = iwl4965_remove_default_wep_key(priv, ctx, key);
-               else
-                       ret = iwl4965_remove_dynamic_key(priv, ctx,
-                                                       key, sta_id);
-
-               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       mutex_unlock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
-                           struct ieee80211_vif *vif,
-                           enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                           u8 buf_size)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret = -EINVAL;
-
-       IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
-                    sta->addr, tid);
-
-       if (!(priv->cfg->sku & IWL_SKU_N))
-               return -EACCES;
-
-       mutex_lock(&priv->mutex);
-
-       switch (action) {
-       case IEEE80211_AMPDU_RX_START:
-               IWL_DEBUG_HT(priv, "start Rx\n");
-               ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
-               break;
-       case IEEE80211_AMPDU_RX_STOP:
-               IWL_DEBUG_HT(priv, "stop Rx\n");
-               ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
-               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-                       ret = 0;
-               break;
-       case IEEE80211_AMPDU_TX_START:
-               IWL_DEBUG_HT(priv, "start Tx\n");
-               ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
-               break;
-       case IEEE80211_AMPDU_TX_STOP:
-               IWL_DEBUG_HT(priv, "stop Tx\n");
-               ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
-               if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-                       ret = 0;
-               break;
-       case IEEE80211_AMPDU_TX_OPERATIONAL:
-               ret = 0;
-               break;
-       }
-       mutex_unlock(&priv->mutex);
-
-       return ret;
-}
-
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
-                      struct ieee80211_vif *vif,
-                      struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-       int ret;
-       u8 sta_id;
-
-       IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
-                       sta->addr);
-       mutex_lock(&priv->mutex);
-       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-                       sta->addr);
-       sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-       atomic_set(&sta_priv->pending_frames, 0);
-
-       ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
-                                    is_ap, sta, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-                       sta->addr, ret);
-               /* Should we return success if return code is EEXIST ? */
-               mutex_unlock(&priv->mutex);
-               return ret;
-       }
-
-       sta_priv->common.sta_id = sta_id;
-
-       /* Initialize rate scaling */
-       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-                      sta->addr);
-       iwl4965_rs_rate_init(priv, sta, sta_id);
-       mutex_unlock(&priv->mutex);
-
-       return 0;
-}
-
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
-                              struct ieee80211_channel_switch *ch_switch)
-{
-       struct iwl_priv *priv = hw->priv;
-       const struct iwl_channel_info *ch_info;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_channel *channel = ch_switch->channel;
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       u16 ch;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       mutex_lock(&priv->mutex);
-
-       if (iwl_legacy_is_rfkill(priv))
-               goto out;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status) ||
-           test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-               goto out;
-
-       if (!iwl_legacy_is_associated_ctx(ctx))
-               goto out;
-
-       if (!priv->cfg->ops->lib->set_channel_switch)
-               goto out;
-
-       ch = channel->hw_value;
-       if (le16_to_cpu(ctx->active.channel) == ch)
-               goto out;
-
-       ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
-       if (!iwl_legacy_is_channel_valid(ch_info)) {
-               IWL_DEBUG_MAC80211(priv, "invalid channel\n");
-               goto out;
-       }
-
-       spin_lock_irq(&priv->lock);
-
-       priv->current_ht_config.smps = conf->smps_mode;
-
-       /* Configure HT40 channels */
-       ctx->ht.enabled = conf_is_ht(conf);
-       if (ctx->ht.enabled) {
-               if (conf_is_ht40_minus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                       ctx->ht.is_40mhz = true;
-               } else if (conf_is_ht40_plus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                       ctx->ht.is_40mhz = true;
-               } else {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                       ctx->ht.is_40mhz = false;
-               }
-       } else
-               ctx->ht.is_40mhz = false;
-
-       if ((le16_to_cpu(ctx->staging.channel) != ch))
-               ctx->staging.flags = 0;
-
-       iwl_legacy_set_rxon_channel(priv, channel, ctx);
-       iwl_legacy_set_rxon_ht(priv, ht_conf);
-       iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
-       spin_unlock_irq(&priv->lock);
-
-       iwl_legacy_set_rate(priv);
-       /*
-        * at this point, staging_rxon has the
-        * configuration for channel switch
-        */
-       set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
-       priv->switch_channel = cpu_to_le16(ch);
-       if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
-               clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
-               priv->switch_channel = 0;
-               ieee80211_chswitch_done(ctx->vif, false);
-       }
-
-out:
-       mutex_unlock(&priv->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
-                            unsigned int changed_flags,
-                            unsigned int *total_flags,
-                            u64 multicast)
-{
-       struct iwl_priv *priv = hw->priv;
-       __le32 filter_or = 0, filter_nand = 0;
-       struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag)        do { \
-       if (*total_flags & (test))              \
-               filter_or |= (flag);            \
-       else                                    \
-               filter_nand |= (flag);          \
-       } while (0)
-
-       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-                       changed_flags, *total_flags);
-
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-       /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
-       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
-       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-       mutex_lock(&priv->mutex);
-
-       for_each_context(priv, ctx) {
-               ctx->staging.filter_flags &= ~filter_nand;
-               ctx->staging.filter_flags |= filter_or;
-
-               /*
-                * Not committing directly because hardware can perform a scan,
-                * but we'll eventually commit the filter flags change anyway.
-                */
-       }
-
-       mutex_unlock(&priv->mutex);
-
-       /*
-        * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_legacy_connection_init_rx_config()
-        * since we currently do not support programming multicast
-        * filters into the device.
-        */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                       txpower_work);
-
-       mutex_lock(&priv->mutex);
-
-       /* If a scan happened to start before we got here
-        * then just return; the statistics notification will
-        * kick off another scheduled work to compensate for
-        * any temperature delta we missed here. */
-       if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
-           test_bit(STATUS_SCANNING, &priv->status))
-               goto out;
-
-       /* Regardless of if we are associated, we must reconfigure the
-        * TX power since frames can be sent on non-radar channels while
-        * not associated */
-       priv->cfg->ops->lib->send_tx_power(priv);
-
-       /* Update last_temperature to keep is_calib_needed from running
-        * when it isn't needed... */
-       priv->last_temperature = priv->temperature;
-out:
-       mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
-       priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
-       init_waitqueue_head(&priv->wait_command_queue);
-
-       INIT_WORK(&priv->restart, iwl4965_bg_restart);
-       INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
-       INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
-       INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
-       INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
-
-       iwl_legacy_setup_scan_deferred_work(priv);
-
-       INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-
-       init_timer(&priv->statistics_periodic);
-       priv->statistics_periodic.data = (unsigned long)priv;
-       priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
-
-       init_timer(&priv->watchdog);
-       priv->watchdog.data = (unsigned long)priv;
-       priv->watchdog.function = iwl_legacy_bg_watchdog;
-
-       tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
-               iwl4965_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
-       cancel_work_sync(&priv->txpower_work);
-       cancel_delayed_work_sync(&priv->init_alive_start);
-       cancel_delayed_work(&priv->alive_start);
-       cancel_work_sync(&priv->run_time_calib_work);
-
-       iwl_legacy_cancel_scan_deferred_work(priv);
-
-       del_timer_sync(&priv->statistics_periodic);
-}
-
-static void iwl4965_init_hw_rates(struct iwl_priv *priv,
-                             struct ieee80211_rate *rates)
-{
-       int i;
-
-       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-               rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
-               rates[i].hw_value = i; /* Rate scaling will work on indexes */
-               rates[i].hw_value_short = i;
-               rates[i].flags = 0;
-               if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
-                       /*
-                        * If CCK != 1M then set short preamble rate flag.
-                        */
-                       rates[i].flags |=
-                               (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
-                                       0 : IEEE80211_RATE_SHORT_PREAMBLE;
-               }
-       }
-}
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
-       iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
-                            (index & 0xff) | (txq_id << 8));
-       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
-                                       struct iwl_tx_queue *txq,
-                                       int tx_fifo_id, int scd_retry)
-{
-       int txq_id = txq->q.id;
-
-       /* Find out whether to activate Tx queue */
-       int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
-       /* Set up and activate */
-       iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
-                        (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
-                        (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
-                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
-                        (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
-                        IWL49_SCD_QUEUE_STTS_REG_MSK);
-
-       txq->sched_retry = scd_retry;
-
-       IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
-                      active ? "Activate" : "Deactivate",
-                      scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-
-static int iwl4965_init_drv(struct iwl_priv *priv)
-{
-       int ret;
-
-       spin_lock_init(&priv->sta_lock);
-       spin_lock_init(&priv->hcmd_lock);
-
-       INIT_LIST_HEAD(&priv->free_frames);
-
-       mutex_init(&priv->mutex);
-
-       priv->ieee_channels = NULL;
-       priv->ieee_rates = NULL;
-       priv->band = IEEE80211_BAND_2GHZ;
-
-       priv->iw_mode = NL80211_IFTYPE_STATION;
-       priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
-       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
-       /* initialize force reset */
-       priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
-       /* Choose which receivers/antennas to use */
-       if (priv->cfg->ops->hcmd->set_rxon_chain)
-               priv->cfg->ops->hcmd->set_rxon_chain(priv,
-                                       &priv->contexts[IWL_RXON_CTX_BSS]);
-
-       iwl_legacy_init_scan_params(priv);
-
-       ret = iwl_legacy_init_channel_map(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-               goto err;
-       }
-
-       ret = iwl_legacy_init_geos(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-               goto err_free_channel_map;
-       }
-       iwl4965_init_hw_rates(priv, priv->ieee_rates);
-
-       return 0;
-
-err_free_channel_map:
-       iwl_legacy_free_channel_map(priv);
-err:
-       return ret;
-}
-
-static void iwl4965_uninit_drv(struct iwl_priv *priv)
-{
-       iwl4965_calib_free_results(priv);
-       iwl_legacy_free_geos(priv);
-       iwl_legacy_free_channel_map(priv);
-       kfree(priv->scan_cmd);
-}
-
-static void iwl4965_hw_detect(struct iwl_priv *priv)
-{
-       priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
-       priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
-       priv->rev_id = priv->pci_dev->revision;
-       IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
-}
-
-static int iwl4965_set_hw_params(struct iwl_priv *priv)
-{
-       priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
-       priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
-       if (priv->cfg->mod_params->amsdu_size_8K)
-               priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
-       else
-               priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
-       priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
-       if (priv->cfg->mod_params->disable_11n)
-               priv->cfg->sku &= ~IWL_SKU_N;
-
-       /* Device-specific setup */
-       return priv->cfg->ops->lib->set_hw_params(priv);
-}
-
-static const u8 iwl4965_bss_ac_to_fifo[] = {
-       IWL_TX_FIFO_VO,
-       IWL_TX_FIFO_VI,
-       IWL_TX_FIFO_BE,
-       IWL_TX_FIFO_BK,
-};
-
-static const u8 iwl4965_bss_ac_to_queue[] = {
-       0, 1, 2, 3,
-};
-
-static int
-iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int err = 0, i;
-       struct iwl_priv *priv;
-       struct ieee80211_hw *hw;
-       struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
-       unsigned long flags;
-       u16 pci_cmd;
-
-       /************************
-        * 1. Allocating HW data
-        ************************/
-
-       hw = iwl_legacy_alloc_all(cfg);
-       if (!hw) {
-               err = -ENOMEM;
-               goto out;
-       }
-       priv = hw->priv;
-       /* At this point both hw and priv are allocated. */
-
-       /*
-        * The default context is always valid,
-        * more may be discovered when firmware
-        * is loaded.
-        */
-       priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
-       for (i = 0; i < NUM_IWL_RXON_CTX; i++)
-               priv->contexts[i].ctxid = i;
-
-       priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
-       priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
-       priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
-       priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
-       priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
-       priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
-       priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
-       priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
-       priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
-               BIT(NL80211_IFTYPE_ADHOC);
-       priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
-               BIT(NL80211_IFTYPE_STATION);
-       priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
-       priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
-       priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
-       priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
-       BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
-
-       SET_IEEE80211_DEV(hw, &pdev->dev);
-
-       IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
-       priv->cfg = cfg;
-       priv->pci_dev = pdev;
-       priv->inta_mask = CSR_INI_SET_MASK;
-
-       if (iwl_legacy_alloc_traffic_mem(priv))
-               IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
-       /**************************
-        * 2. Initializing PCI bus
-        **************************/
-       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-                               PCIE_LINK_STATE_CLKPM);
-
-       if (pci_enable_device(pdev)) {
-               err = -ENODEV;
-               goto out_ieee80211_free_hw;
-       }
-
-       pci_set_master(pdev);
-
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (err) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (!err)
-                       err = pci_set_consistent_dma_mask(pdev,
-                                                       DMA_BIT_MASK(32));
-               /* both attempts failed: */
-               if (err) {
-                       IWL_WARN(priv, "No suitable DMA available.\n");
-                       goto out_pci_disable_device;
-               }
-       }
-
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err)
-               goto out_pci_disable_device;
-
-       pci_set_drvdata(pdev, priv);
-
-
-       /***********************
-        * 3. Read REV register
-        ***********************/
-       priv->hw_base = pci_iomap(pdev, 0, 0);
-       if (!priv->hw_base) {
-               err = -ENODEV;
-               goto out_pci_release_regions;
-       }
-
-       IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
-               (unsigned long long) pci_resource_len(pdev, 0));
-       IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
-       /* these spin locks will be used in apm_ops.init and EEPROM access
-        * we should init now
-        */
-       spin_lock_init(&priv->reg_lock);
-       spin_lock_init(&priv->lock);
-
-       /*
-        * stop and reset the on-board processor just in case it is in a
-        * strange state ... like being left stranded by a primary kernel
-        * and this is now the kdump kernel trying to start up
-        */
-       iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
-       iwl4965_hw_detect(priv);
-       IWL_INFO(priv, "Detected %s, REV=0x%X\n",
-               priv->cfg->name, priv->hw_rev);
-
-       /* We disable the RETRY_TIMEOUT register (0x41) to keep
-        * PCI Tx retries from interfering with C3 CPU state */
-       pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
-       iwl4965_prepare_card_hw(priv);
-       if (!priv->hw_ready) {
-               IWL_WARN(priv, "Failed, HW not ready\n");
-               goto out_iounmap;
-       }
-
-       /*****************
-        * 4. Read EEPROM
-        *****************/
-       /* Read the EEPROM */
-       err = iwl_legacy_eeprom_init(priv);
-       if (err) {
-               IWL_ERR(priv, "Unable to init EEPROM\n");
-               goto out_iounmap;
-       }
-       err = iwl4965_eeprom_check_version(priv);
-       if (err)
-               goto out_free_eeprom;
-
-       if (err)
-               goto out_free_eeprom;
-
-       /* extract MAC Address */
-       iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
-       IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
-       priv->hw->wiphy->addresses = priv->addresses;
-       priv->hw->wiphy->n_addresses = 1;
-
-       /************************
-        * 5. Setup HW constants
-        ************************/
-       if (iwl4965_set_hw_params(priv)) {
-               IWL_ERR(priv, "failed to set hw parameters\n");
-               goto out_free_eeprom;
-       }
-
-       /*******************
-        * 6. Setup priv
-        *******************/
-
-       err = iwl4965_init_drv(priv);
-       if (err)
-               goto out_free_eeprom;
-       /* At this point both hw and priv are initialized. */
-
-       /********************
-        * 7. Setup services
-        ********************/
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_legacy_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       pci_enable_msi(priv->pci_dev);
-
-       err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
-                         IRQF_SHARED, DRV_NAME, priv);
-       if (err) {
-               IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
-               goto out_disable_msi;
-       }
-
-       iwl4965_setup_deferred_work(priv);
-       iwl4965_setup_rx_handlers(priv);
-
-       /*********************************************
-        * 8. Enable interrupts and read RFKILL state
-        *********************************************/
-
-       /* enable rfkill interrupt: hw bug w/a */
-       pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
-       if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
-               pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
-               pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
-       }
-
-       iwl_legacy_enable_rfkill_int(priv);
-
-       /* If platform's RF_KILL switch is NOT set to KILL */
-       if (iwl_read32(priv, CSR_GP_CNTRL) &
-               CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
-               clear_bit(STATUS_RF_KILL_HW, &priv->status);
-       else
-               set_bit(STATUS_RF_KILL_HW, &priv->status);
-
-       wiphy_rfkill_set_hw_state(priv->hw->wiphy,
-               test_bit(STATUS_RF_KILL_HW, &priv->status));
-
-       iwl_legacy_power_initialize(priv);
-
-       init_completion(&priv->_4965.firmware_loading_complete);
-
-       err = iwl4965_request_firmware(priv, true);
-       if (err)
-               goto out_destroy_workqueue;
-
-       return 0;
-
- out_destroy_workqueue:
-       destroy_workqueue(priv->workqueue);
-       priv->workqueue = NULL;
-       free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
-       pci_disable_msi(priv->pci_dev);
-       iwl4965_uninit_drv(priv);
- out_free_eeprom:
-       iwl_legacy_eeprom_free(priv);
- out_iounmap:
-       pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
-       pci_set_drvdata(pdev, NULL);
-       pci_release_regions(pdev);
- out_pci_disable_device:
-       pci_disable_device(pdev);
- out_ieee80211_free_hw:
-       iwl_legacy_free_traffic_mem(priv);
-       ieee80211_free_hw(priv->hw);
- out:
-       return err;
-}
-
-static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
-{
-       struct iwl_priv *priv = pci_get_drvdata(pdev);
-       unsigned long flags;
-
-       if (!priv)
-               return;
-
-       wait_for_completion(&priv->_4965.firmware_loading_complete);
-
-       IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
-       iwl_legacy_dbgfs_unregister(priv);
-       sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
-
-       /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
-        * to be called and iwl4965_down since we are removing the device
-        * we need to set STATUS_EXIT_PENDING bit.
-        */
-       set_bit(STATUS_EXIT_PENDING, &priv->status);
-
-       iwl_legacy_leds_exit(priv);
-
-       if (priv->mac80211_registered) {
-               ieee80211_unregister_hw(priv->hw);
-               priv->mac80211_registered = 0;
-       } else {
-               iwl4965_down(priv);
-       }
-
-       /*
-        * Make sure device is reset to low power before unloading driver.
-        * This may be redundant with iwl4965_down(), but there are paths to
-        * run iwl4965_down() without calling apm_ops.stop(), and there are
-        * paths to avoid running iwl4965_down() at all before leaving driver.
-        * This (inexpensive) call *makes sure* device is reset.
-        */
-       iwl_legacy_apm_stop(priv);
-
-       /* make sure we flush any pending irq or
-        * tasklet for the driver
-        */
-       spin_lock_irqsave(&priv->lock, flags);
-       iwl_legacy_disable_interrupts(priv);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       iwl4965_synchronize_irq(priv);
-
-       iwl4965_dealloc_ucode_pci(priv);
-
-       if (priv->rxq.bd)
-               iwl4965_rx_queue_free(priv, &priv->rxq);
-       iwl4965_hw_txq_ctx_free(priv);
-
-       iwl_legacy_eeprom_free(priv);
-
-
-       /*netif_stop_queue(dev); */
-       flush_workqueue(priv->workqueue);
-
-       /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
-        * priv->workqueue... so we can't take down the workqueue
-        * until now... */
-       destroy_workqueue(priv->workqueue);
-       priv->workqueue = NULL;
-       iwl_legacy_free_traffic_mem(priv);
-
-       free_irq(priv->pci_dev->irq, priv);
-       pci_disable_msi(priv->pci_dev);
-       pci_iounmap(pdev, priv->hw_base);
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-
-       iwl4965_uninit_drv(priv);
-
-       dev_kfree_skb(priv->beacon_skb);
-
-       ieee80211_free_hw(priv->hw);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
-       iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
-#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
-       {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
-       {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
-#endif /* CONFIG_IWL4965 */
-
-       {0}
-};
-MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
-
-static struct pci_driver iwl4965_driver = {
-       .name = DRV_NAME,
-       .id_table = iwl4965_hw_card_ids,
-       .probe = iwl4965_pci_probe,
-       .remove = __devexit_p(iwl4965_pci_remove),
-       .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl4965_init(void)
-{
-
-       int ret;
-       pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
-       pr_info(DRV_COPYRIGHT "\n");
-
-       ret = iwl4965_rate_control_register();
-       if (ret) {
-               pr_err("Unable to register rate control algorithm: %d\n", ret);
-               return ret;
-       }
-
-       ret = pci_register_driver(&iwl4965_driver);
-       if (ret) {
-               pr_err("Unable to initialize PCI module\n");
-               goto error_register;
-       }
-
-       return ret;
-
-error_register:
-       iwl4965_rate_control_unregister();
-       return ret;
-}
-
-static void __exit iwl4965_exit(void)
-{
-       pci_unregister_driver(&iwl4965_driver);
-       iwl4965_rate_control_unregister();
-}
-
-module_exit(iwl4965_exit);
-module_init(iwl4965_init);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
-                  int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
similarity index 83%
rename from drivers/net/wireless/iwlegacy/iwl-prph.h
rename to drivers/net/wireless/iwlegacy/prph.h
index 30a4930..ffec4b4 100644 (file)
@@ -60,8 +60,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
 
-#ifndef        __iwl_legacy_prph_h__
-#define __iwl_legacy_prph_h__
+#ifndef        __il_prph_h__
+#define __il_prph_h__
 
 /*
  * Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
 #define APMG_PS_CTRL_VAL_RESET_REQ             (0x04000000)
 #define APMG_PS_CTRL_MSK_PWR_SRC               (0x03000000)
 #define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN         (0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_MAX           (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX           (0x01000000)    /* 3945 only */
 #define APMG_PS_CTRL_VAL_PWR_SRC_VAUX          (0x02000000)
-#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK        (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK        (0x000001E0)    /* bit 8:5 */
 #define APMG_SVR_DIGITAL_VOLTAGE_1_32          (0x00000060)
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
  *
  * 1)  Initialization -- performs hardware calibration and sets up some
  *     internal data, then notifies host via "initialize alive" notification
- *     (struct iwl_init_alive_resp) that it has completed all of its work.
+ *     (struct il_init_alive_resp) that it has completed all of its work.
  *     After signal from host, it then loads and starts the runtime program.
  *     The initialization program must be used when initially setting up the
  *     NIC after loading the driver.
  *
  * 2)  Runtime/Protocol -- performs all normal runtime operations.  This
- *     notifies host via "alive" notification (struct iwl_alive_resp) that it
+ *     notifies host via "alive" notification (struct il_alive_resp) that it
  *     is ready to be used.
  *
  * When initializing the NIC, the host driver does the following procedure:
  *        procedure.
  *
  * This save/restore method is mostly for autonomous power management during
- * normal operation (result of POWER_TABLE_CMD).  Platform suspend/resume and
+ * normal operation (result of C_POWER_TBL).  Platform suspend/resume and
  * RFKILL should use complete restarts (with total re-initialization) of uCode,
  * allowing total shutdown (including BSM memory).
  *
  */
 
 /* BSM bit fields */
-#define BSM_WR_CTRL_REG_BIT_START     (0x80000000) /* start boot load now */
-#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000) /* enable boot after pwrup*/
-#define BSM_DRAM_INST_LOAD            (0x80000000) /* start program load now */
+#define BSM_WR_CTRL_REG_BIT_START     (0x80000000)     /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000)     /* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD            (0x80000000)     /* start program load now */
 
 /* BSM addresses */
 #define BSM_BASE                     (PRPH_BASE + 0x3400)
 #define BSM_END                      (PRPH_BASE + 0x3800)
 
-#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000) /* ctl and status */
-#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004) /* source in BSM mem */
-#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008) /* dest in SRAM mem */
-#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C) /* bytes */
-#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010) /* bit 0:  1 == done */
+#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000)        /* ctl and status */
+#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004)        /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008)        /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C)        /* bytes */
+#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010)        /* bit 0:  1 == done */
 
 /*
  * Pointers and size regs for bootstrap load and data SRAM save/restore.
  * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
  */
 #define BSM_SRAM_LOWER_BOUND         (PRPH_BASE + 0x3800)
-#define BSM_SRAM_SIZE                  (1024) /* bytes */
-
+#define BSM_SRAM_SIZE                  (1024)  /* bytes */
 
 /* 3945 Tx scheduler registers */
 #define ALM_SCD_BASE                        (PRPH_BASE + 0x2E00)
  * but one DMA channel may take input from several queues.
  *
  * Tx DMA FIFOs have dedicated purposes.  For 4965, they are used as follows
- * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ * (cf. default_queue_to_tx_fifo in 4965.c):
  *
  * 0 -- EDCA BK (background) frames, lowest priority
  * 1 -- EDCA BE (best effort) frames, normal priority
  * The driver sets up each queue to work in one of two modes:
  *
  * 1)  Scheduler-Ack, in which the scheduler automatically supports a
- *     block-ack (BA) window of up to 64 TFDs.  In this mode, each queue
+ *     block-ack (BA) win of up to 64 TFDs.  In this mode, each queue
  *     contains TFDs for a unique combination of Recipient Address (RA)
  *     and Traffic Identifier (TID), that is, traffic of a given
  *     Quality-Of-Service (QOS) priority, destined for a single station.
  *
  *     In scheduler-ack mode, the scheduler keeps track of the Tx status of
- *     each frame within the BA window, including whether it's been transmitted,
+ *     each frame within the BA win, including whether it's been transmitted,
  *     and whether it's been acknowledged by the receiving station.  The device
  *     automatically processes block-acks received from the receiving STA,
  *     and reschedules un-acked frames to be retransmitted (successful
  *     Tx completion may end up being out-of-order).
  *
  *     The driver must maintain the queue's Byte Count table in host DRAM
- *     (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ *     (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
  *     This mode does not support fragmentation.
  *
  * 2)  FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
  */
 
 /**
- * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
  * can keep track of at one time when creating block-ack chains of frames.
  * Note that "64" matches the number of ack bits in a block-ack packet.
  * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
- * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
  */
 #define SCD_WIN_SIZE                           64
 #define SCD_FRAME_LIMIT                                64
 
 /* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
-#define IWL49_SCD_START_OFFSET         0xa02c00
+#define IL49_SCD_START_OFFSET          0xa02c00
 
 /*
  * 4965 tells driver SRAM address for internal scheduler structs via this reg.
  * Value is valid only after "Alive" response from uCode.
  */
-#define IWL49_SCD_SRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x0)
+#define IL49_SCD_SRAM_BASE_ADDR           (IL49_SCD_START_OFFSET + 0x0)
 
 /*
  * Driver may need to update queue-empty bits after changing queue's
- * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
  * scheduler is not tracking what's happening).
  * Bit fields:
  * 31-16:  Write mask -- 1: update empty bit, 0: don't change empty bit
  * 15-00:  Empty state, one for each queue -- 1: empty, 0: non-empty
  * NOTE:  This register is not used by Linux driver.
  */
-#define IWL49_SCD_EMPTY_BITS               (IWL49_SCD_START_OFFSET + 0x4)
+#define IL49_SCD_EMPTY_BITS               (IL49_SCD_START_OFFSET + 0x4)
 
 /*
  * Physical base address of array of byte count (BC) circular buffers (CBs).
  * This register points to BC CB for queue 0, must be on 1024-byte boundary.
  * Others are spaced by 1024 bytes.
  * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
- * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
  * Bit fields:
  * 25-00:  Byte Count CB physical address [35:10], must be 1024-byte aligned.
  */
-#define IWL49_SCD_DRAM_BASE_ADDR           (IWL49_SCD_START_OFFSET + 0x10)
+#define IL49_SCD_DRAM_BASE_ADDR           (IL49_SCD_START_OFFSET + 0x10)
 
 /*
  * Enables any/all Tx DMA/FIFO channels.
  * Bit fields:
  *  7- 0:  Enable (1), disable (0), one bit for each channel 0-7
  */
-#define IWL49_SCD_TXFACT                   (IWL49_SCD_START_OFFSET + 0x1c)
+#define IL49_SCD_TXFACT                   (IL49_SCD_START_OFFSET + 0x1c)
 /*
- * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
  * Initialized and updated by driver as new TFDs are added to queue.
- * NOTE:  If using Block Ack, index must correspond to frame's
- *        Start Sequence Number; index = (SSN & 0xff)
+ * NOTE:  If using Block Ack, idx must correspond to frame's
+ *        Start Sequence Number; idx = (SSN & 0xff)
  * NOTE:  Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
  */
-#define IWL49_SCD_QUEUE_WRPTR(x)  (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+#define IL49_SCD_QUEUE_WRPTR(x)  (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
 
 /*
- * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
- * For FIFO mode, index indicates next frame to transmit.
- * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
  * Initialized by driver, updated by scheduler.
  */
-#define IWL49_SCD_QUEUE_RDPTR(x)  (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+#define IL49_SCD_QUEUE_RDPTR(x)  (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
 
 /*
  * Select which queues work in chain mode (1) vs. not (0).
  * NOTE:  If driver sets up queue for chain mode, it should be also set up
  *        Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
  */
-#define IWL49_SCD_QUEUECHAIN_SEL  (IWL49_SCD_START_OFFSET + 0xd0)
+#define IL49_SCD_QUEUECHAIN_SEL  (IL49_SCD_START_OFFSET + 0xd0)
 
 /*
  * Select which queues interrupt driver when scheduler increments
- * a queue's read pointer (index).
+ * a queue's read pointer (idx).
  * Bit fields:
  * 31-16:  Reserved
  * 15-00:  Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
  * NOTE:  This functionality is apparently a no-op; driver relies on interrupts
  *        from Rx queue to read Tx command responses and update Tx queues.
  */
-#define IWL49_SCD_INTERRUPT_MASK  (IWL49_SCD_START_OFFSET + 0xe4)
+#define IL49_SCD_INTERRUPT_MASK  (IL49_SCD_START_OFFSET + 0xe4)
 
 /*
  * Queue search status registers.  One for each queue.
  *        Driver should init to "1" for aggregation mode, or "0" otherwise.
  *   7-6: Driver should init to "0"
  *     5: Window Size Left; indicates whether scheduler can request
- *        another TFD, based on window size, etc.  Driver should init
+ *        another TFD, based on win size, etc.  Driver should init
  *        this bit to "1" for aggregation mode, or "0" for non-agg.
  *   4-1: Tx FIFO to use (range 0-7).
  *     0: Queue is active (1), not active (0).
  * NOTE:  If enabling Scheduler-ACK mode, chain mode should also be enabled
  *        via SCD_QUEUECHAIN_SEL.
  */
-#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
-       (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+       (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
 
 /* Bit field positions */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE    (0)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF       (1)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL       (5)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK   (8)
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE     (0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF        (1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL        (5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK    (8)
 
 /* Write masks */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN        (10)
-#define IWL49_SCD_QUEUE_STTS_REG_MSK           (0x0007FC00)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK            (0x0007FC00)
 
 /**
  * 4965 internal SRAM structures for scheduler, shared with driver ...
  * each queue's entry as follows:
  *
  * LS Dword bit fields:
- *  0-06:  Max Tx window size for Scheduler-ACK.  Driver should init to 64.
+ *  0-06:  Max Tx win size for Scheduler-ACK.  Driver should init to 64.
  *
  * MS Dword bit fields:
  * 16-22:  Frame limit.  Driver should init to 10 (0xa).
  * Init must be done after driver receives "Alive" response from 4965 uCode,
  * and when setting up queue for aggregation.
  */
-#define IWL49_SCD_CONTEXT_DATA_OFFSET                  0x380
-#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
-                       (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+#define IL49_SCD_CONTEXT_DATA_OFFSET                   0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+                       (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
 
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS          (0)
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK          (0x0000007F)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS       (16)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK       (0x007F0000)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS           (0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK           (0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS        (16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK        (0x007F0000)
 
 /*
  * Tx Status Bitmap
  * "Alive" notification from uCode.  Area is used only by device itself;
  * no other support (besides clearing) is required from driver.
  */
-#define IWL49_SCD_TX_STTS_BITMAP_OFFSET                0x400
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET         0x400
 
 /*
  * RAxTID to queue translation mapping.
  * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
  * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
  * one QOS priority level destined for one station (for this wireless link,
- * not final destination).  The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * not final destination).  The SCD_TRANSLATE_TBL area provides 16 16-bit
  * mappings, one for each of the 16 queues.  If queue is not in Scheduler-ACK
  * mode, the device ignores the mapping value.
  *
  * must read a dword-aligned value from device SRAM, replace the 16-bit map
  * value of interest, and write the dword value back into device SRAM.
  */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET         0x500
+#define IL49_SCD_TRANSLATE_TBL_OFFSET          0x500
 
 /* Find translation table dword to read/write for given queue */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
-       ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+       ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
 
-#define IWL_SCD_TXFIFO_POS_TID                 (0)
-#define IWL_SCD_TXFIFO_POS_RA                  (4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK     (0x01FF)
+#define IL_SCD_TXFIFO_POS_TID                  (0)
+#define IL_SCD_TXFIFO_POS_RA                   (4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK      (0x01FF)
 
 /*********************** END TX SCHEDULER *************************************/
 
-#endif                         /* __iwl_legacy_prph_h__ */
+#endif /* __il_prph_h__ */
index c73e5ed..86344ce 100644 (file)
@@ -1,7 +1,7 @@
 # WIFI
 obj-$(CONFIG_IWLWIFI)  += iwlwifi.o
-iwlwifi-objs           := iwl-agn.o iwl-agn-rs.o
-iwlwifi-objs           += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs           := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
+iwlwifi-objs           += iwl-ucode.o iwl-agn-tx.o
 iwlwifi-objs           += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
 iwlwifi-objs           += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
 
@@ -18,7 +18,7 @@ iwlwifi-objs          += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
 
 iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-testmode.o
 
 CFLAGS_iwl-devtrace.o := -I$(src)
 
index dd008b0..8d3bad7 100644 (file)
@@ -147,16 +147,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
        iwl1000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl1000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-                       BIT(IWL_CALIB_XTAL)             |
-                       BIT(IWL_CALIB_LO)               |
-                       BIT(IWL_CALIB_TX_IQ)            |
-                       BIT(IWL_CALIB_TX_IQ_PERD)       |
-                       BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
 
        return 0;
 }
index 7943197..0c4688d 100644 (file)
@@ -143,17 +143,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
        iwl2000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl2000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_XTAL)             |
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
-       if (priv->cfg->need_temp_offset_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
 
        return 0;
 }
@@ -258,7 +248,6 @@ static struct iwl_bt_params iwl2030_bt_params = {
        .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
        .lib = &iwl2000_lib,                                    \
        .base_params = &iwl2000_base_params,                    \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -270,11 +259,6 @@ struct iwl_cfg iwl2000_2bgn_cfg = {
        .ht_params = &iwl2000_ht_params,
 };
 
-struct iwl_cfg iwl2000_2bg_cfg = {
-       .name = "2000 Series 2x2 BG",
-       IWL_DEVICE_2000,
-};
-
 struct iwl_cfg iwl2000_2bgn_d_cfg = {
        .name = "2000D Series 2x2 BGN",
        IWL_DEVICE_2000,
@@ -291,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
        .lib = &iwl2030_lib,                                    \
        .base_params = &iwl2030_base_params,                    \
        .bt_params = &iwl2030_bt_params,                        \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -304,11 +287,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
        .ht_params = &iwl2000_ht_params,
 };
 
-struct iwl_cfg iwl2030_2bg_cfg = {
-       .name = "2000 Series 2x2 BG/BT",
-       IWL_DEVICE_2030,
-};
-
 #define IWL_DEVICE_105                                         \
        .fw_name_pre = IWL105_FW_PRE,                           \
        .ucode_api_max = IWL105_UCODE_API_MAX,                  \
@@ -318,7 +296,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
        .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION,       \
        .lib = &iwl2000_lib,                                    \
        .base_params = &iwl2000_base_params,                    \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -326,11 +303,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
        .rx_with_siso_diversity = true,                         \
        .iq_invert = true                                       \
 
-struct iwl_cfg iwl105_bg_cfg = {
-       .name = "105 Series 1x1 BG",
-       IWL_DEVICE_105,
-};
-
 struct iwl_cfg iwl105_bgn_cfg = {
        .name = "105 Series 1x1 BGN",
        IWL_DEVICE_105,
@@ -353,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
        .lib = &iwl2030_lib,                                    \
        .base_params = &iwl2030_base_params,                    \
        .bt_params = &iwl2030_bt_params,                        \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
@@ -361,11 +332,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
        .rx_with_siso_diversity = true,                         \
        .iq_invert = true                                       \
 
-struct iwl_cfg iwl135_bg_cfg = {
-       .name = "135 Series 1x1 BG/BT",
-       IWL_DEVICE_135,
-};
-
 struct iwl_cfg iwl135_bgn_cfg = {
        .name = "135 Series 1x1 BGN/BT",
        IWL_DEVICE_135,
index f55fb2d..cf2fb47 100644 (file)
@@ -186,14 +186,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
        iwl5000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl5000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_XTAL)             |
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_TX_IQ_PERD)       |
-               BIT(IWL_CALIB_BASE_BAND);
 
        return 0;
 }
@@ -222,14 +215,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
        iwl5150_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl5150_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
 
        return 0;
 }
@@ -434,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
        .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,       \
        .lib = &iwl5150_lib,                                    \
        .base_params = &iwl5000_base_params,                    \
-       .need_dc_calib = true,                                  \
+       .no_xtal_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
index c840c78..617ad1c 100644 (file)
 #include "iwl-cfg.h"
 
 /* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
 #define IWL6050_UCODE_API_MAX 5
 #define IWL6000G2_UCODE_API_MAX 6
 
 /* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
 #define IWL6000G2_UCODE_API_OK 5
 
 /* Lowest firmware API version supported */
@@ -164,17 +165,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
        iwl6000_set_ct_threshold(priv);
 
        /* Set initial sensitivity parameters */
-       /* Set initial calibration set */
        hw_params(priv).sens = &iwl6000_sensitivity;
-       hw_params(priv).calib_init_cfg =
-               BIT(IWL_CALIB_XTAL)             |
-               BIT(IWL_CALIB_LO)               |
-               BIT(IWL_CALIB_TX_IQ)            |
-               BIT(IWL_CALIB_BASE_BAND);
-       if (priv->cfg->need_dc_calib)
-               hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
-       if (priv->cfg->need_temp_offset_calib)
-               hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
 
        return 0;
 }
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
        .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,       \
        .lib = &iwl6000_lib,                                    \
        .base_params = &iwl6000_g2_base_params,                 \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .led_mode = IWL_LED_RF_STATE
 
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
        .lib = &iwl6030_lib,                                    \
        .base_params = &iwl6000_g2_base_params,                 \
        .bt_params = &iwl6000_bt_params,                        \
-       .need_dc_calib = true,                                  \
        .need_temp_offset_calib = true,                         \
        .led_mode = IWL_LED_RF_STATE,                           \
        .adv_pm = true                                          \
@@ -439,16 +428,6 @@ struct iwl_cfg iwl6035_2agn_cfg = {
        .ht_params = &iwl6000_ht_params,
 };
 
-struct iwl_cfg iwl6035_2abg_cfg = {
-       .name = "6035 Series 2x2 ABG/BT",
-       IWL_DEVICE_6030,
-};
-
-struct iwl_cfg iwl6035_2bg_cfg = {
-       .name = "6035 Series 2x2 BG/BT",
-       IWL_DEVICE_6030,
-};
-
 struct iwl_cfg iwl1030_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
        IWL_DEVICE_6030,
@@ -479,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
 #define IWL_DEVICE_6000i                                       \
        .fw_name_pre = IWL6000_FW_PRE,                          \
        .ucode_api_max = IWL6000_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL6000_UCODE_API_OK,                   \
        .ucode_api_min = IWL6000_UCODE_API_MIN,                 \
        .valid_tx_ant = ANT_BC,         /* .cfg overwrite */    \
        .valid_rx_ant = ANT_BC,         /* .cfg overwrite */    \
@@ -516,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
        .eeprom_ver = EEPROM_6050_EEPROM_VERSION,               \
        .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,       \
        .base_params = &iwl6050_base_params,                    \
-       .need_dc_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
@@ -540,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
        .eeprom_ver = EEPROM_6150_EEPROM_VERSION,               \
        .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,       \
        .base_params = &iwl6050_base_params,                    \
-       .need_dc_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
@@ -559,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
        .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
        .fw_name_pre = IWL6000_FW_PRE,
        .ucode_api_max = IWL6000_UCODE_API_MAX,
+       .ucode_api_ok = IWL6000_UCODE_API_OK,
        .ucode_api_min = IWL6000_UCODE_API_MIN,
        .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
        .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
        .lib = &iwl6000_lib,
        .base_params = &iwl6000_base_params,
        .ht_params = &iwl6000_ht_params,
-       .need_dc_calib = true,
        .led_mode = IWL_LED_BLINK,
 };
 
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
index 03bac48..4d02105 100644 (file)
@@ -84,54 +84,62 @@ struct statistics_general_data {
 
 int iwl_send_calib_results(struct iwl_priv *priv)
 {
-       int ret = 0;
-       int i = 0;
-
        struct iwl_host_cmd hcmd = {
                .id = REPLY_PHY_CALIBRATION_CMD,
                .flags = CMD_SYNC,
        };
-
-       for (i = 0; i < IWL_CALIB_MAX; i++) {
-               if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
-                   priv->calib_results[i].buf) {
-                       hcmd.len[0] = priv->calib_results[i].buf_len;
-                       hcmd.data[0] = priv->calib_results[i].buf;
-                       hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
-                       ret = iwl_trans_send_cmd(trans(priv), &hcmd);
-                       if (ret) {
-                               IWL_ERR(priv, "Error %d iteration %d\n",
-                                       ret, i);
-                               break;
-                       }
+       struct iwl_calib_result *res;
+
+       list_for_each_entry(res, &priv->calib_results, list) {
+               int ret;
+
+               hcmd.len[0] = res->cmd_len;
+               hcmd.data[0] = &res->hdr;
+               hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+               ret = iwl_trans_send_cmd(trans(priv), &hcmd);
+               if (ret) {
+                       IWL_ERR(priv, "Error %d on calib cmd %d\n",
+                               ret, res->hdr.op_code);
+                       return ret;
                }
        }
 
-       return ret;
+       return 0;
 }
 
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+int iwl_calib_set(struct iwl_priv *priv,
+                 const struct iwl_calib_hdr *cmd, int len)
 {
-       if (res->buf_len != len) {
-               kfree(res->buf);
-               res->buf = kzalloc(len, GFP_ATOMIC);
-       }
-       if (unlikely(res->buf == NULL))
+       struct iwl_calib_result *res, *tmp;
+
+       res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+                     GFP_ATOMIC);
+       if (!res)
                return -ENOMEM;
+       memcpy(&res->hdr, cmd, len);
+       res->cmd_len = len;
+
+       list_for_each_entry(tmp, &priv->calib_results, list) {
+               if (tmp->hdr.op_code == res->hdr.op_code) {
+                       list_replace(&tmp->list, &res->list);
+                       kfree(tmp);
+                       return 0;
+               }
+       }
+
+       /* wasn't in list already */
+       list_add_tail(&res->list, &priv->calib_results);
 
-       res->buf_len = len;
-       memcpy(res->buf, buf, len);
        return 0;
 }
 
 void iwl_calib_free_results(struct iwl_priv *priv)
 {
-       int i;
+       struct iwl_calib_result *res, *tmp;
 
-       for (i = 0; i < IWL_CALIB_MAX; i++) {
-               kfree(priv->calib_results[i].buf);
-               priv->calib_results[i].buf = NULL;
-               priv->calib_results[i].buf_len = 0;
+       list_for_each_entry_safe(res, tmp, &priv->calib_results, list) {
+               list_del(&res->list);
+               kfree(res);
        }
 }
 
index a869fc9..6ed806c 100644 (file)
@@ -73,7 +73,8 @@ void iwl_init_sensitivity(struct iwl_priv *priv);
 void iwl_reset_run_time_calib(struct iwl_priv *priv);
 
 int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
+int iwl_calib_set(struct iwl_priv *priv,
+                 const struct iwl_calib_hdr *cmd, int len);
 void iwl_calib_free_results(struct iwl_priv *priv);
 
 #endif /* __iwl_calib_h__ */
index 1a52ed2..575d1bb 100644 (file)
@@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
        case IEEE80211_SMPS_STATIC:
        case IEEE80211_SMPS_DYNAMIC:
                return IWL_NUM_IDLE_CHAINS_SINGLE;
+       case IEEE80211_SMPS_AUTOMATIC:
        case IEEE80211_SMPS_OFF:
                return active_cnt;
        default:
@@ -933,53 +934,359 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
        return ant;
 }
 
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
-                                  struct iwl_notification_wait *wait_entry,
-                                  u8 cmd,
-                                  void (*fn)(struct iwl_priv *priv,
-                                             struct iwl_rx_packet *pkt,
-                                             void *data),
-                                  void *fn_data)
+#ifdef CONFIG_PM_SLEEP
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
 {
-       wait_entry->fn = fn;
-       wait_entry->fn_data = fn_data;
-       wait_entry->cmd = cmd;
-       wait_entry->triggered = false;
-       wait_entry->aborted = false;
-
-       spin_lock_bh(&priv->notif_wait_lock);
-       list_add(&wait_entry->list, &priv->notif_waits);
-       spin_unlock_bh(&priv->notif_wait_lock);
+       int i;
+
+       for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+               out[i] = cpu_to_le16(p1k[i]);
 }
 
-int iwlagn_wait_notification(struct iwl_priv *priv,
-                            struct iwl_notification_wait *wait_entry,
-                            unsigned long timeout)
+struct wowlan_key_data {
+       struct iwl_rxon_context *ctx;
+       struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+       struct iwlagn_wowlan_tkip_params_cmd *tkip;
+       const u8 *bssid;
+       bool error, use_rsc_tsc, use_tkip;
+};
+
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_sta *sta,
+                              struct ieee80211_key_conf *key,
+                              void *_data)
 {
-       int ret;
+       struct iwl_priv *priv = hw->priv;
+       struct wowlan_key_data *data = _data;
+       struct iwl_rxon_context *ctx = data->ctx;
+       struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+       struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+       struct iwlagn_p1k_cache *rx_p1ks;
+       u8 *rx_mic_key;
+       struct ieee80211_key_seq seq;
+       u32 cur_rx_iv32 = 0;
+       u16 p1k[IWLAGN_P1K_SIZE];
+       int ret, i;
+
+       mutex_lock(&priv->shrd->mutex);
 
-       ret = wait_event_timeout(priv->notif_waitq,
-                                wait_entry->triggered || wait_entry->aborted,
-                                timeout);
+       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+            key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+            !sta && !ctx->key_mapping_keys)
+               ret = iwl_set_default_wep_key(priv, ctx, key);
+       else
+               ret = iwl_set_dynamic_key(priv, ctx, key, sta);
 
-       spin_lock_bh(&priv->notif_wait_lock);
-       list_del(&wait_entry->list);
-       spin_unlock_bh(&priv->notif_wait_lock);
+       if (ret) {
+               IWL_ERR(priv, "Error setting key during suspend!\n");
+               data->error = true;
+       }
 
-       if (wait_entry->aborted)
-               return -EIO;
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               if (sta) {
+                       tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+                       tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
 
-       /* return value is always >= 0 */
-       if (ret <= 0)
-               return -ETIMEDOUT;
-       return 0;
+                       rx_p1ks = data->tkip->rx_uni;
+
+                       ieee80211_get_key_tx_seq(key, &seq);
+                       tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+                       tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+                       ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+                       iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+                       memcpy(data->tkip->mic_keys.tx,
+                              &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+                              IWLAGN_MIC_KEY_SIZE);
+
+                       rx_mic_key = data->tkip->mic_keys.rx_unicast;
+               } else {
+                       tkip_sc =
+                               data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+                       rx_p1ks = data->tkip->rx_multi;
+                       rx_mic_key = data->tkip->mic_keys.rx_mcast;
+               }
+
+               /*
+                * For non-QoS this relies on the fact that both the uCode and
+                * mac80211 use TID 0 (as they need to to avoid replay attacks)
+                * for checking the IV in the frames.
+                */
+               for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+                       ieee80211_get_key_rx_seq(key, i, &seq);
+                       tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+                       tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+                       /* wrapping isn't allowed, AP must rekey */
+                       if (seq.tkip.iv32 > cur_rx_iv32)
+                               cur_rx_iv32 = seq.tkip.iv32;
+               }
+
+               ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+               iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+               ieee80211_get_tkip_rx_p1k(key, data->bssid,
+                                         cur_rx_iv32 + 1, p1k);
+               iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+               memcpy(rx_mic_key,
+                      &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+                      IWLAGN_MIC_KEY_SIZE);
+
+               data->use_tkip = true;
+               data->use_rsc_tsc = true;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               if (sta) {
+                       u8 *pn = seq.ccmp.pn;
+
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+                       aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+                       ieee80211_get_key_tx_seq(key, &seq);
+                       aes_tx_sc->pn = cpu_to_le64(
+                                       (u64)pn[5] |
+                                       ((u64)pn[4] << 8) |
+                                       ((u64)pn[3] << 16) |
+                                       ((u64)pn[2] << 24) |
+                                       ((u64)pn[1] << 32) |
+                                       ((u64)pn[0] << 40));
+               } else
+                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+               /*
+                * For non-QoS this relies on the fact that both the uCode and
+                * mac80211 use TID 0 for checking the IV in the frames.
+                */
+               for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+                       u8 *pn = seq.ccmp.pn;
+
+                       ieee80211_get_key_rx_seq(key, i, &seq);
+                       aes_sc->pn = cpu_to_le64(
+                                       (u64)pn[5] |
+                                       ((u64)pn[4] << 8) |
+                                       ((u64)pn[3] << 16) |
+                                       ((u64)pn[2] << 24) |
+                                       ((u64)pn[1] << 32) |
+                                       ((u64)pn[0] << 40));
+               }
+               data->use_rsc_tsc = true;
+               break;
+       }
+
+       mutex_unlock(&priv->shrd->mutex);
+}
+
+int iwlagn_send_patterns(struct iwl_priv *priv,
+                       struct cfg80211_wowlan *wowlan)
+{
+       struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_WOWLAN_PATTERNS,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+               .flags = CMD_SYNC,
+       };
+       int i, err;
+
+       if (!wowlan->n_patterns)
+               return 0;
+
+       cmd.len[0] = sizeof(*pattern_cmd) +
+               wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+       pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+       if (!pattern_cmd)
+               return -ENOMEM;
+
+       pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+       for (i = 0; i < wowlan->n_patterns; i++) {
+               int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+               memcpy(&pattern_cmd->patterns[i].mask,
+                       wowlan->patterns[i].mask, mask_len);
+               memcpy(&pattern_cmd->patterns[i].pattern,
+                       wowlan->patterns[i].pattern,
+                       wowlan->patterns[i].pattern_len);
+               pattern_cmd->patterns[i].mask_size = mask_len;
+               pattern_cmd->patterns[i].pattern_size =
+                       wowlan->patterns[i].pattern_len;
+       }
+
+       cmd.data[0] = pattern_cmd;
+       err = iwl_trans_send_cmd(trans(priv), &cmd);
+       kfree(pattern_cmd);
+       return err;
 }
 
-void iwlagn_remove_notification(struct iwl_priv *priv,
-                               struct iwl_notification_wait *wait_entry)
+int iwlagn_suspend(struct iwl_priv *priv,
+               struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
-       spin_lock_bh(&priv->notif_wait_lock);
-       list_del(&wait_entry->list);
-       spin_unlock_bh(&priv->notif_wait_lock);
+       struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+       struct iwl_rxon_cmd rxon;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+       struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+       struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
+       struct wowlan_key_data key_data = {
+               .ctx = ctx,
+               .bssid = ctx->active.bssid_addr,
+               .use_rsc_tsc = false,
+               .tkip = &tkip_cmd,
+               .use_tkip = false,
+       };
+       int ret, i;
+       u16 seq;
+
+       key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+       if (!key_data.rsc_tsc)
+               return -ENOMEM;
+
+       memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+       /*
+        * We know the last used seqno, and the uCode expects to know that
+        * one, it will increment before TX.
+        */
+       seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+       wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+       /*
+        * For QoS counters, we store the one to use next, so subtract 0x10
+        * since the uCode will add 0x10 before using the value.
+        */
+       for (i = 0; i < 8; i++) {
+               seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
+               seq -= 0x10;
+               wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+       }
+
+       if (wowlan->disconnect)
+               wakeup_filter_cmd.enabled |=
+                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+                                   IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+       if (wowlan->magic_pkt)
+               wakeup_filter_cmd.enabled |=
+                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+       if (wowlan->gtk_rekey_failure)
+               wakeup_filter_cmd.enabled |=
+                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+       if (wowlan->eap_identity_req)
+               wakeup_filter_cmd.enabled |=
+                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+       if (wowlan->four_way_handshake)
+               wakeup_filter_cmd.enabled |=
+                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+       if (wowlan->n_patterns)
+               wakeup_filter_cmd.enabled |=
+                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+       if (wowlan->rfkill_release)
+               d3_cfg_cmd.wakeup_flags |=
+                       cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
+
+       iwl_scan_cancel_timeout(priv, 200);
+
+       memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+       iwl_trans_stop_device(trans(priv));
+
+       priv->shrd->wowlan = true;
+
+       ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+       if (ret)
+               goto out;
+
+       /* now configure WoWLAN ucode */
+       ret = iwl_alive_start(priv);
+       if (ret)
+               goto out;
+
+       memcpy(&ctx->staging, &rxon, sizeof(rxon));
+       ret = iwlagn_commit_rxon(priv, ctx);
+       if (ret)
+               goto out;
+
+       ret = iwl_power_update_mode(priv, true);
+       if (ret)
+               goto out;
+
+       if (!iwlagn_mod_params.sw_crypto) {
+               /* mark all keys clear */
+               priv->ucode_key_table = 0;
+               ctx->key_mapping_keys = 0;
+
+               /*
+                * This needs to be unlocked due to lock ordering
+                * constraints. Since we're in the suspend path
+                * that isn't really a problem though.
+                */
+               mutex_unlock(&priv->shrd->mutex);
+               ieee80211_iter_keys(priv->hw, ctx->vif,
+                                   iwlagn_wowlan_program_keys,
+                                   &key_data);
+               mutex_lock(&priv->shrd->mutex);
+               if (key_data.error) {
+                       ret = -EIO;
+                       goto out;
+               }
+
+               if (key_data.use_rsc_tsc) {
+                       struct iwl_host_cmd rsc_tsc_cmd = {
+                               .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+                               .flags = CMD_SYNC,
+                               .data[0] = key_data.rsc_tsc,
+                               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+                               .len[0] = sizeof(key_data.rsc_tsc),
+                       };
+
+                       ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+                       if (ret)
+                               goto out;
+               }
+
+               if (key_data.use_tkip) {
+                       ret = iwl_trans_send_cmd_pdu(trans(priv),
+                                                REPLY_WOWLAN_TKIP_PARAMS,
+                                                CMD_SYNC, sizeof(tkip_cmd),
+                                                &tkip_cmd);
+                       if (ret)
+                               goto out;
+               }
+
+               if (priv->have_rekey_data) {
+                       memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+                       memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+                       kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+                       memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+                       kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+                       kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+                       ret = iwl_trans_send_cmd_pdu(trans(priv),
+                                                REPLY_WOWLAN_KEK_KCK_MATERIAL,
+                                                CMD_SYNC, sizeof(kek_kck_cmd),
+                                                &kek_kck_cmd);
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+                                    sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+       if (ret)
+               goto out;
+
+       ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+                                CMD_SYNC, sizeof(wakeup_filter_cmd),
+                                &wakeup_filter_cmd);
+       if (ret)
+               goto out;
+
+       ret = iwlagn_send_patterns(priv, wowlan);
+ out:
+       kfree(key_data.rsc_tsc);
+       return ret;
 }
+#endif
index 66118ce..359c47a 100644 (file)
@@ -1458,10 +1458,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
                /* avoid antenna B unless MIMO */
-               valid_tx_ant =
-                       first_antenna(hw_params(priv).valid_tx_ant);
                if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
-                       tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+                       tbl->action = IWL_LEGACY_SWITCH_SISO;
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
        case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1636,10 +1634,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
                /* avoid antenna B unless MIMO */
-               valid_tx_ant =
-                       first_antenna(hw_params(priv).valid_tx_ant);
                if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
-                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+                       tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
                break;
        case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
        case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
index 5af9e62..90c55ea 100644 (file)
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
                IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
                IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
                IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+               IWL_CMD(REPLY_D3_CONFIG);
        default:
                return "UNKNOWN";
 
@@ -800,7 +801,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
                                               ctx->active.bssid_addr))
                                continue;
                        ctx->last_tx_rejected = false;
-                       iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
+                       iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
+                               "channel got active");
                }
        }
 
@@ -1032,6 +1034,50 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
        return 0;
 }
 
+static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
+                                     struct iwl_rx_mem_buffer *rxb,
+                                     struct iwl_device_cmd *cmd)
+{
+       struct iwl_wipan_noa_data *new_data, *old_data;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+
+       /* no condition -- we're in softirq */
+       old_data = rcu_dereference_protected(priv->noa_data, true);
+
+       if (noa_notif->noa_active) {
+               u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
+               u32 copylen = len;
+
+               /* EID, len, OUI, subtype */
+               len += 1 + 1 + 3 + 1;
+               /* P2P id, P2P length */
+               len += 1 + 2;
+               copylen += 1 + 2;
+
+               new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+               if (new_data) {
+                       new_data->length = len;
+                       new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
+                       new_data->data[1] = len - 2; /* not counting EID, len */
+                       new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+                       new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+                       new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+                       new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
+                       memcpy(&new_data->data[6], &noa_notif->noa_attribute,
+                              copylen);
+               }
+       } else
+               new_data = NULL;
+
+       rcu_assign_pointer(priv->noa_data, new_data);
+
+       if (old_data)
+               kfree_rcu(old_data, rcu_head);
+
+       return 0;
+}
+
 /**
  * iwl_setup_rx_handlers - Initialize Rx handler callbacks
  *
@@ -1055,6 +1101,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
        handlers[BEACON_NOTIFICATION]           = iwlagn_rx_beacon_notif;
        handlers[REPLY_ADD_STA]                 = iwl_add_sta_callback;
 
+       handlers[REPLY_WIPAN_NOA_NOTIFICATION]  = iwlagn_rx_noa_notification;
+
        /*
         * The same handler is used for both the REPLY to a discrete
         * statistics request from the host as well as for the periodic
@@ -1083,9 +1131,9 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
        priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
 
        /* set up notification wait support */
-       spin_lock_init(&priv->notif_wait_lock);
-       INIT_LIST_HEAD(&priv->notif_waits);
-       init_waitqueue_head(&priv->notif_waitq);
+       spin_lock_init(&priv->shrd->notif_wait_lock);
+       INIT_LIST_HEAD(&priv->shrd->notif_waits);
+       init_waitqueue_head(&priv->shrd->notif_waitq);
 
        /* Set up BT Rx handlers */
        if (priv->cfg->lib->bt_rx_handler_setup)
@@ -1104,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
         * even if the RX handler consumes the RXB we have
         * access to it in the notification wait entry.
         */
-       if (!list_empty(&priv->notif_waits)) {
+       if (!list_empty(&priv->shrd->notif_waits)) {
                struct iwl_notification_wait *w;
 
-               spin_lock(&priv->notif_wait_lock);
-               list_for_each_entry(w, &priv->notif_waits, list) {
+               spin_lock(&priv->shrd->notif_wait_lock);
+               list_for_each_entry(w, &priv->shrd->notif_waits, list) {
                        if (w->cmd != pkt->hdr.cmd)
                                continue;
                        IWL_DEBUG_RX(priv,
@@ -1119,9 +1167,9 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
                        if (w->fn)
                                w->fn(priv, pkt, w->fn_data);
                }
-               spin_unlock(&priv->notif_wait_lock);
+               spin_unlock(&priv->shrd->notif_wait_lock);
 
-               wake_up_all(&priv->notif_waitq);
+               wake_up_all(&priv->shrd->notif_waitq);
        }
 
        if (priv->pre_rx_handler)
index a7a6def..466e4ab 100644 (file)
@@ -45,7 +45,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
        send->filter_flags = old_filter;
 
        if (ret)
-               IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+               IWL_DEBUG_QUIET_RFKILL(priv,
+                       "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
 
        return ret;
 }
@@ -59,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        u8 old_dev_type = send->dev_type;
        int ret;
 
-       iwlagn_init_notification_wait(priv, &disable_wait,
+       iwl_init_notification_wait(priv->shrd, &disable_wait,
                                      REPLY_WIPAN_DEACTIVATION_COMPLETE,
                                      NULL, NULL);
 
@@ -73,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
 
        if (ret) {
                IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-               iwlagn_remove_notification(priv, &disable_wait);
+               iwl_remove_notification(priv->shrd, &disable_wait);
        } else {
-               ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+               ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
                if (ret)
                        IWL_ERR(priv, "Timed out waiting for PAN disable\n");
        }
@@ -116,7 +117,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
        if (ctx->ht.enabled)
                ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
 
-       IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+       IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
                      ctx->qos_data.qos_active,
                      ctx->qos_data.def_qos_parm.qos_flags);
 
@@ -124,7 +125,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
                               sizeof(struct iwl_qosparam_cmd),
                               &ctx->qos_data.def_qos_parm);
        if (ret)
-               IWL_ERR(priv, "Failed to update QoS\n");
+               IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
 }
 
 static int iwlagn_update_beacon(struct iwl_priv *priv,
@@ -559,6 +560,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
 
        mutex_lock(&priv->shrd->mutex);
 
+       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+               goto out;
+
        if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
                IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
                goto out;
@@ -850,7 +854,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
                        if (ctx->last_tx_rejected) {
                                ctx->last_tx_rejected = false;
                                iwl_trans_wake_any_queue(trans(priv),
-                                                        ctx->ctxid);
+                                                        ctx->ctxid,
+                                                        "Disassoc: flush queue");
                        }
                        ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 
index 4b2aa1d..626ed70 100644 (file)
@@ -647,7 +647,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        int ret;
        struct iwl_addsta_cmd sta_cmd;
        struct iwl_link_quality_cmd lq;
-       bool active;
+       bool active, have_lq = false;
 
        spin_lock_irqsave(&priv->shrd->sta_lock, flags);
        if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
@@ -657,7 +657,10 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 
        memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
        sta_cmd.mode = 0;
-       memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+       if (priv->stations[sta_id].lq) {
+               memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+               have_lq = true;
+       }
 
        active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
        priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -679,7 +682,8 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        if (ret)
                IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
                        priv->stations[sta_id].sta.sta.addr, ret);
-       iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+       if (have_lq)
+               iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
 }
 
 int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
@@ -825,28 +829,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
        return ret;
 }
 
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
-                      struct ieee80211_vif *vif,
-                      struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
-                          "station %pM\n", sta->addr);
-       mutex_lock(&priv->shrd->mutex);
-       IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
-                       sta->addr);
-       ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
-       if (ret)
-               IWL_ERR(priv, "Error removing station %pM\n",
-                       sta->addr);
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
 
 void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                     u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
@@ -1459,20 +1441,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
        return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 }
 
-static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
-{
-       unsigned long flags;
 
-       spin_lock_irqsave(&priv->shrd->sta_lock, flags);
-       priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
-       priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
-       priv->stations[sta_id].sta.sta.modify_mask = 0;
-       priv->stations[sta_id].sta.sleep_tx_count = 0;
-       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-       iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-       spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-
-}
 
 void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
 {
@@ -1489,36 +1458,3 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
        spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
 
 }
-
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
-                          struct ieee80211_vif *vif,
-                          enum sta_notify_cmd cmd,
-                          struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       int sta_id;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       switch (cmd) {
-       case STA_NOTIFY_SLEEP:
-               WARN_ON(!sta_priv->client);
-               sta_priv->asleep = true;
-               if (atomic_read(&sta_priv->pending_frames) > 0)
-                       ieee80211_sta_block_awake(hw, sta, true);
-               break;
-       case STA_NOTIFY_AWAKE:
-               WARN_ON(!sta_priv->client);
-               if (!sta_priv->asleep)
-                       break;
-               sta_priv->asleep = false;
-               sta_id = iwl_sta_id(sta);
-               if (sta_id != IWL_INVALID_STATION)
-                       iwl_sta_modify_ps_wake(priv, sta_id);
-               break;
-       default:
-               break;
-       }
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
index 35a6b71..a1a95d5 100644 (file)
@@ -283,6 +283,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
                IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
 #endif
 
+       if (unlikely(ieee80211_is_probe_resp(fc))) {
+               struct iwl_wipan_noa_data *noa_data =
+                       rcu_dereference(priv->noa_data);
+
+               if (noa_data &&
+                   pskb_expand_head(skb, 0, noa_data->length,
+                                    GFP_ATOMIC) == 0) {
+                       memcpy(skb_put(skb, noa_data->length),
+                              noa_data->data, noa_data->length);
+                       hdr = (struct ieee80211_hdr *)skb->data;
+               }
+       }
+
        hdr_len = ieee80211_hdrlen(fc);
 
        /* For management frames use broadcast id to do not break aggregation */
@@ -777,6 +790,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
                iwl_rx_reply_tx_agg(priv, tx_resp);
 
        if (tx_resp->frame_count == 1) {
+               IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn);
                __skb_queue_head_init(&skbs);
                /*we can free until ssn % q.n_bd not inclusive */
                iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
@@ -800,7 +814,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
                            iwl_is_associated_ctx(ctx) && ctx->vif &&
                            ctx->vif->type == NL80211_IFTYPE_STATION) {
                                ctx->last_tx_rejected = true;
-                               iwl_trans_stop_queue(trans(priv), txq_id);
+                               iwl_trans_stop_queue(trans(priv), txq_id,
+                                       "Tx on passive channel");
 
                                IWL_DEBUG_TX_REPLY(priv,
                                           "TXQ %d status %s (0x%08x) "
@@ -906,11 +921,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                           ba_resp->sta_id);
        IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
                           "scd_flow = %d, scd_ssn = %d\n",
-                          ba_resp->tid,
-                          ba_resp->seq_ctl,
+                          ba_resp->tid, ba_resp->seq_ctl,
                           (unsigned long long)le64_to_cpu(ba_resp->bitmap),
-                          ba_resp->scd_flow,
-                          ba_resp->scd_ssn);
+                          scd_flow, ba_resp_scd_ssn);
 
        /* Mark that the expected block-ack response arrived */
        agg->wait_for_ba = false;
index bacc06c..daf010d 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
-#include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/skbuff.h>
@@ -452,52 +451,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
        iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
 }
 
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
-{
-       if (desc->v_addr)
-               dma_free_coherent(bus(priv)->dev, desc->len,
-                                 desc->v_addr, desc->p_addr);
-       desc->v_addr = NULL;
-       desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
-{
-       iwl_free_fw_desc(priv, &img->code);
-       iwl_free_fw_desc(priv, &img->data);
-}
-
-static void iwl_dealloc_ucode(struct iwl_priv *priv)
-{
-       iwl_free_fw_img(priv, &priv->ucode_rt);
-       iwl_free_fw_img(priv, &priv->ucode_init);
-       iwl_free_fw_img(priv, &priv->ucode_wowlan);
-}
-
-static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
-                            const void *data, size_t len)
-{
-       if (!len) {
-               desc->v_addr = NULL;
-               return -EINVAL;
-       }
-
-       desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
-                                         &desc->p_addr, GFP_KERNEL);
-       if (!desc->v_addr)
-               return -ENOMEM;
-
-       desc->len = len;
-       memcpy(desc->v_addr, data, len);
-       return 0;
-}
-
 static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
 {
        int i;
@@ -555,16 +508,7 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
        BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
 }
 
-
-struct iwlagn_ucode_capabilities {
-       u32 max_probe_length;
-       u32 standard_phy_calibration_size;
-       u32 flags;
-};
-
 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
-                                 struct iwlagn_ucode_capabilities *capa);
 
 #define UCODE_EXPERIMENTAL_INDEX       100
 #define UCODE_EXPERIMENTAL_TAG         "exp"
@@ -1040,30 +984,32 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
        /* Runtime instructions and 2 copies of data:
         * 1) unmodified from disk
         * 2) backup cache for save/restore during power-downs */
-       if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
+       if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
                              pieces.inst, pieces.inst_size))
                goto err_pci_alloc;
-       if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
+       if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
                              pieces.data, pieces.data_size))
                goto err_pci_alloc;
 
        /* Initialization instructions and data */
        if (pieces.init_size && pieces.init_data_size) {
-               if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
+               if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
                                      pieces.init, pieces.init_size))
                        goto err_pci_alloc;
-               if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
+               if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
                                      pieces.init_data, pieces.init_data_size))
                        goto err_pci_alloc;
        }
 
        /* WoWLAN instructions and data */
        if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
-               if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
+               if (iwl_alloc_fw_desc(bus(priv),
+                                     &trans(priv)->ucode_wowlan.code,
                                      pieces.wowlan_inst,
                                      pieces.wowlan_inst_size))
                        goto err_pci_alloc;
-               if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
+               if (iwl_alloc_fw_desc(bus(priv),
+                                     &trans(priv)->ucode_wowlan.data,
                                      pieces.wowlan_data,
                                      pieces.wowlan_data_size))
                        goto err_pci_alloc;
@@ -1156,7 +1102,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
 
  err_pci_alloc:
        IWL_ERR(priv, "failed to allocate pci memory\n");
-       iwl_dealloc_ucode(priv);
+       iwl_dealloc_ucode(trans(priv));
  out_unbind:
        complete(&priv->firmware_loading_complete);
        device_release_driver(bus(priv)->dev);
@@ -1286,14 +1232,14 @@ int iwl_alive_start(struct iwl_priv *priv)
                priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
                priv->cur_rssi_ctx = NULL;
 
-               iwlagn_send_prio_tbl(priv);
+               iwl_send_prio_tbl(trans(priv));
 
                /* FIXME: w/a to force change uCode BT state machine */
-               ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+               ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
                                         BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
                if (ret)
                        return ret;
-               ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+               ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
                                         BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
                if (ret)
                        return ret;
@@ -1352,7 +1298,7 @@ int iwl_alive_start(struct iwl_priv *priv)
 
 static void iwl_cancel_deferred_work(struct iwl_priv *priv);
 
-static void __iwl_down(struct iwl_priv *priv)
+void __iwl_down(struct iwl_priv *priv)
 {
        int exit_pending;
 
@@ -1415,7 +1361,7 @@ static void __iwl_down(struct iwl_priv *priv)
        priv->beacon_skb = NULL;
 }
 
-static void iwl_down(struct iwl_priv *priv)
+void iwl_down(struct iwl_priv *priv)
 {
        mutex_lock(&priv->shrd->mutex);
        __iwl_down(priv);
@@ -1424,57 +1370,6 @@ static void iwl_down(struct iwl_priv *priv)
        iwl_cancel_deferred_work(priv);
 }
 
-#define MAX_HW_RESTARTS 5
-
-static int __iwl_up(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx;
-       int ret;
-
-       lockdep_assert_held(&priv->shrd->mutex);
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
-               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
-               return -EIO;
-       }
-
-       for_each_context(priv, ctx) {
-               ret = iwlagn_alloc_bcast_station(priv, ctx);
-               if (ret) {
-                       iwl_dealloc_bcast_stations(priv);
-                       return ret;
-               }
-       }
-
-       ret = iwlagn_run_init_ucode(priv);
-       if (ret) {
-               IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
-               goto error;
-       }
-
-       ret = iwlagn_load_ucode_wait_alive(priv,
-                                          &priv->ucode_rt,
-                                          IWL_UCODE_REGULAR);
-       if (ret) {
-               IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
-               goto error;
-       }
-
-       ret = iwl_alive_start(priv);
-       if (ret)
-               goto error;
-       return 0;
-
- error:
-       set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-       __iwl_down(priv);
-       clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
-       IWL_ERR(priv, "Unable to initialize device.\n");
-       return ret;
-}
-
-
 /*****************************************************************************
  *
  * Workqueue callbacks
@@ -1502,7 +1397,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
        mutex_unlock(&priv->shrd->mutex);
 }
 
-static void iwlagn_prepare_restart(struct iwl_priv *priv)
+void iwlagn_prepare_restart(struct iwl_priv *priv)
 {
        struct iwl_rxon_context *ctx;
        bool bt_full_concurrent;
@@ -1559,1498 +1454,182 @@ static void iwl_bg_restart(struct work_struct *data)
        }
 }
 
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_AP),
-       },
-};
-
-static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
-       {
-               .max = 2,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_GO) |
-                        BIT(NL80211_IFTYPE_AP),
-       },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
-       {
-               .max = 2,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
-       },
-};
 
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_dualmode[] = {
-       { .num_different_channels = 1,
-         .max_interfaces = 2,
-         .beacon_int_infra_match = true,
-         .limits = iwlagn_sta_ap_limits,
-         .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
-       },
-       { .num_different_channels = 1,
-         .max_interfaces = 2,
-         .limits = iwlagn_2sta_limits,
-         .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
-       },
-};
 
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
-       { .num_different_channels = 1,
-         .max_interfaces = 2,
-         .beacon_int_infra_match = true,
-         .limits = iwlagn_p2p_sta_go_limits,
-         .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
-       },
-       { .num_different_channels = 1,
-         .max_interfaces = 2,
-         .limits = iwlagn_p2p_2sta_limits,
-         .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
-       },
-};
 
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
-                                 struct iwlagn_ucode_capabilities *capa)
+void iwlagn_disable_roc(struct iwl_priv *priv)
 {
-       int ret;
-       struct ieee80211_hw *hw = priv->hw;
-       struct iwl_rxon_context *ctx;
-
-       hw->rate_control_algorithm = "iwl-agn-rs";
-
-       /* Tell mac80211 our characteristics */
-       hw->flags = IEEE80211_HW_SIGNAL_DBM |
-                   IEEE80211_HW_AMPDU_AGGREGATION |
-                   IEEE80211_HW_NEED_DTIM_PERIOD |
-                   IEEE80211_HW_SPECTRUM_MGMT |
-                   IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
-       /*
-        * Including the following line will crash some AP's.  This
-        * workaround removes the stimulus which causes the crash until
-        * the AP software can be fixed.
-       hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-        */
-
-       hw->flags |= IEEE80211_HW_SUPPORTS_PS |
-                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
-       if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
-               hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
-                            IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
-       if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
-               hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
-       hw->sta_data_size = sizeof(struct iwl_station_priv);
-       hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
-       for_each_context(priv, ctx) {
-               hw->wiphy->interface_modes |= ctx->interface_modes;
-               hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
-       }
-
-       BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-
-       if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
-               hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
-               hw->wiphy->n_iface_combinations =
-                       ARRAY_SIZE(iwlagn_iface_combinations_p2p);
-       } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
-               hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
-               hw->wiphy->n_iface_combinations =
-                       ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
-       }
-
-       hw->wiphy->max_remain_on_channel_duration = 1000;
-
-       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
-                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
-                           WIPHY_FLAG_IBSS_RSN;
-
-       if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
-               hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
-                                         WIPHY_WOWLAN_DISCONNECT |
-                                         WIPHY_WOWLAN_EAP_IDENTITY_REQ |
-                                         WIPHY_WOWLAN_RFKILL_RELEASE;
-               if (!iwlagn_mod_params.sw_crypto)
-                       hw->wiphy->wowlan.flags |=
-                               WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
-                               WIPHY_WOWLAN_GTK_REKEY_FAILURE;
-
-               hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
-               hw->wiphy->wowlan.pattern_min_len =
-                                       IWLAGN_WOWLAN_MIN_PATTERN_LEN;
-               hw->wiphy->wowlan.pattern_max_len =
-                                       IWLAGN_WOWLAN_MAX_PATTERN_LEN;
-       }
-
-       if (iwlagn_mod_params.power_save)
-               hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
-       else
-               hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
 
-       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
-       /* we create the 802.11 header and a zero-length SSID element */
-       hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+       lockdep_assert_held(&priv->shrd->mutex);
 
-       /* Default value; 4 EDCA QOS priorities */
-       hw->queues = 4;
+       if (!priv->hw_roc_setup)
+               return;
 
-       hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+       ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 
-       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &priv->bands[IEEE80211_BAND_2GHZ];
-       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
-               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &priv->bands[IEEE80211_BAND_5GHZ];
+       priv->hw_roc_channel = NULL;
 
-       iwl_leds_init(priv);
+       memset(ctx->staging.node_addr, 0, ETH_ALEN);
 
-       ret = ieee80211_register_hw(priv->hw);
-       if (ret) {
-               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
-               return ret;
-       }
-       priv->mac80211_registered = 1;
+       iwlagn_commit_rxon(priv, ctx);
 
-       return 0;
+       ctx->is_active = false;
+       priv->hw_roc_setup = false;
 }
 
-
-static int iwlagn_mac_start(struct ieee80211_hw *hw)
+static void iwlagn_disable_roc_work(struct work_struct *work)
 {
-       struct iwl_priv *priv = hw->priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
+       struct iwl_priv *priv = container_of(work, struct iwl_priv,
+                                            hw_roc_disable_work.work);
 
-       /* we should be verifying the device is ready to be opened */
        mutex_lock(&priv->shrd->mutex);
-       ret = __iwl_up(priv);
+       iwlagn_disable_roc(priv);
        mutex_unlock(&priv->shrd->mutex);
-       if (ret)
-               return ret;
-
-       IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
-       /* Now we should be done, and the READY bit should be set. */
-       if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
-               ret = -EIO;
-
-       iwlagn_led_enable(priv);
-
-       priv->is_open = 1;
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
 }
 
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void iwl_setup_deferred_work(struct iwl_priv *priv)
 {
-       struct iwl_priv *priv = hw->priv;
+       priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME);
 
-       IWL_DEBUG_MAC80211(priv, "enter\n");
+       init_waitqueue_head(&priv->shrd->wait_command_queue);
 
-       if (!priv->is_open)
-               return;
+       INIT_WORK(&priv->restart, iwl_bg_restart);
+       INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
+       INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
+       INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
+       INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
+       INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
+       INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
+                         iwlagn_disable_roc_work);
 
-       priv->is_open = 0;
+       iwl_setup_scan_deferred_work(priv);
 
-       iwl_down(priv);
+       if (priv->cfg->lib->bt_setup_deferred_work)
+               priv->cfg->lib->bt_setup_deferred_work(priv);
 
-       flush_workqueue(priv->shrd->workqueue);
+       init_timer(&priv->statistics_periodic);
+       priv->statistics_periodic.data = (unsigned long)priv;
+       priv->statistics_periodic.function = iwl_bg_statistics_periodic;
 
-       /* User space software may expect getting rfkill changes
-        * even if interface is down */
-       iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
-       iwl_enable_rfkill_int(priv);
+       init_timer(&priv->ucode_trace);
+       priv->ucode_trace.data = (unsigned long)priv;
+       priv->ucode_trace.function = iwl_bg_ucode_trace;
 
-       IWL_DEBUG_MAC80211(priv, "leave\n");
+       init_timer(&priv->watchdog);
+       priv->watchdog.data = (unsigned long)priv;
+       priv->watchdog.function = iwl_bg_watchdog;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int iwlagn_send_patterns(struct iwl_priv *priv,
-                               struct cfg80211_wowlan *wowlan)
+static void iwl_cancel_deferred_work(struct iwl_priv *priv)
 {
-       struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = REPLY_WOWLAN_PATTERNS,
-               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-               .flags = CMD_SYNC,
-       };
-       int i, err;
+       if (priv->cfg->lib->cancel_deferred_work)
+               priv->cfg->lib->cancel_deferred_work(priv);
 
-       if (!wowlan->n_patterns)
-               return 0;
+       cancel_work_sync(&priv->run_time_calib_work);
+       cancel_work_sync(&priv->beacon_update);
 
-       cmd.len[0] = sizeof(*pattern_cmd) +
-                       wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+       iwl_cancel_scan_deferred_work(priv);
 
-       pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
-       if (!pattern_cmd)
-               return -ENOMEM;
+       cancel_work_sync(&priv->bt_full_concurrency);
+       cancel_work_sync(&priv->bt_runtime_config);
+       cancel_delayed_work_sync(&priv->hw_roc_disable_work);
 
-       pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+       del_timer_sync(&priv->statistics_periodic);
+       del_timer_sync(&priv->ucode_trace);
+}
 
-       for (i = 0; i < wowlan->n_patterns; i++) {
-               int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+static void iwl_init_hw_rates(struct iwl_priv *priv,
+                             struct ieee80211_rate *rates)
+{
+       int i;
 
-               memcpy(&pattern_cmd->patterns[i].mask,
-                       wowlan->patterns[i].mask, mask_len);
-               memcpy(&pattern_cmd->patterns[i].pattern,
-                       wowlan->patterns[i].pattern,
-                       wowlan->patterns[i].pattern_len);
-               pattern_cmd->patterns[i].mask_size = mask_len;
-               pattern_cmd->patterns[i].pattern_size =
-                       wowlan->patterns[i].pattern_len;
+       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
+               rates[i].bitrate = iwl_rates[i].ieee * 5;
+               rates[i].hw_value = i; /* Rate scaling will work on indexes */
+               rates[i].hw_value_short = i;
+               rates[i].flags = 0;
+               if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
+                       /*
+                        * If CCK != 1M then set short preamble rate flag.
+                        */
+                       rates[i].flags |=
+                               (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
+                                       0 : IEEE80211_RATE_SHORT_PREAMBLE;
+               }
        }
-
-       cmd.data[0] = pattern_cmd;
-       err = iwl_trans_send_cmd(trans(priv), &cmd);
-       kfree(pattern_cmd);
-       return err;
 }
-#endif
 
-static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
-                                     struct ieee80211_vif *vif,
-                                     struct cfg80211_gtk_rekey_data *data)
+static int iwl_init_drv(struct iwl_priv *priv)
 {
-       struct iwl_priv *priv = hw->priv;
-
-       if (iwlagn_mod_params.sw_crypto)
-               return;
+       int ret;
 
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
+       spin_lock_init(&priv->shrd->sta_lock);
 
-       if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
-               goto out;
+       mutex_init(&priv->shrd->mutex);
 
-       memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
-       memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
-       priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
-       priv->have_rekey_data = true;
+       INIT_LIST_HEAD(&priv->calib_results);
 
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
+       priv->ieee_channels = NULL;
+       priv->ieee_rates = NULL;
+       priv->band = IEEE80211_BAND_2GHZ;
 
-struct wowlan_key_data {
-       struct iwl_rxon_context *ctx;
-       struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
-       struct iwlagn_wowlan_tkip_params_cmd *tkip;
-       const u8 *bssid;
-       bool error, use_rsc_tsc, use_tkip;
-};
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+       priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+       priv->agg_tids_count = 0;
 
-#ifdef CONFIG_PM_SLEEP
-static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
-{
-       int i;
+       /* initialize force reset */
+       priv->force_reset[IWL_RF_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_RF_RESET;
+       priv->force_reset[IWL_FW_RESET].reset_duration =
+               IWL_DELAY_NEXT_FORCE_FW_RELOAD;
 
-       for (i = 0; i < IWLAGN_P1K_SIZE; i++)
-               out[i] = cpu_to_le16(p1k[i]);
-}
+       priv->rx_statistics_jiffies = jiffies;
 
-static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif,
-                                      struct ieee80211_sta *sta,
-                                      struct ieee80211_key_conf *key,
-                                      void *_data)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct wowlan_key_data *data = _data;
-       struct iwl_rxon_context *ctx = data->ctx;
-       struct aes_sc *aes_sc, *aes_tx_sc = NULL;
-       struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
-       struct iwlagn_p1k_cache *rx_p1ks;
-       u8 *rx_mic_key;
-       struct ieee80211_key_seq seq;
-       u32 cur_rx_iv32 = 0;
-       u16 p1k[IWLAGN_P1K_SIZE];
-       int ret, i;
+       /* Choose which receivers/antennas to use */
+       iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
 
-       mutex_lock(&priv->shrd->mutex);
+       iwl_init_scan_params(priv);
 
-       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-            key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
-            !sta && !ctx->key_mapping_keys)
-               ret = iwl_set_default_wep_key(priv, ctx, key);
-       else
-               ret = iwl_set_dynamic_key(priv, ctx, key, sta);
+       /* init bt coex */
+       if (priv->cfg->bt_params &&
+           priv->cfg->bt_params->advanced_bt_coexist) {
+               priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
+               priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
+               priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
+               priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
+               priv->bt_duration = BT_DURATION_LIMIT_DEF;
+               priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
+       }
 
+       ret = iwl_init_channel_map(priv);
        if (ret) {
-               IWL_ERR(priv, "Error setting key during suspend!\n");
-               data->error = true;
+               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
+               goto err;
        }
 
-       switch (key->cipher) {
-       case WLAN_CIPHER_SUITE_TKIP:
-               if (sta) {
-                       tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
-                       tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
-
-                       rx_p1ks = data->tkip->rx_uni;
-
-                       ieee80211_get_key_tx_seq(key, &seq);
-                       tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
-                       tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+       ret = iwl_init_geos(priv);
+       if (ret) {
+               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
+               goto err_free_channel_map;
+       }
+       iwl_init_hw_rates(priv, priv->ieee_rates);
 
-                       ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
-                       iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+       return 0;
 
-                       memcpy(data->tkip->mic_keys.tx,
-                              &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
-                              IWLAGN_MIC_KEY_SIZE);
-
-                       rx_mic_key = data->tkip->mic_keys.rx_unicast;
-               } else {
-                       tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
-                       rx_p1ks = data->tkip->rx_multi;
-                       rx_mic_key = data->tkip->mic_keys.rx_mcast;
-               }
-
-               /*
-                * For non-QoS this relies on the fact that both the uCode and
-                * mac80211 use TID 0 (as they need to to avoid replay attacks)
-                * for checking the IV in the frames.
-                */
-               for (i = 0; i < IWLAGN_NUM_RSC; i++) {
-                       ieee80211_get_key_rx_seq(key, i, &seq);
-                       tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
-                       tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
-                       /* wrapping isn't allowed, AP must rekey */
-                       if (seq.tkip.iv32 > cur_rx_iv32)
-                               cur_rx_iv32 = seq.tkip.iv32;
-               }
-
-               ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
-               iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
-               ieee80211_get_tkip_rx_p1k(key, data->bssid,
-                                         cur_rx_iv32 + 1, p1k);
-               iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
-
-               memcpy(rx_mic_key,
-                      &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
-                      IWLAGN_MIC_KEY_SIZE);
-
-               data->use_tkip = true;
-               data->use_rsc_tsc = true;
-               break;
-       case WLAN_CIPHER_SUITE_CCMP:
-               if (sta) {
-                       u8 *pn = seq.ccmp.pn;
-
-                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
-                       aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
-
-                       ieee80211_get_key_tx_seq(key, &seq);
-                       aes_tx_sc->pn = cpu_to_le64(
-                                       (u64)pn[5] |
-                                       ((u64)pn[4] << 8) |
-                                       ((u64)pn[3] << 16) |
-                                       ((u64)pn[2] << 24) |
-                                       ((u64)pn[1] << 32) |
-                                       ((u64)pn[0] << 40));
-               } else
-                       aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
-
-               /*
-                * For non-QoS this relies on the fact that both the uCode and
-                * mac80211 use TID 0 for checking the IV in the frames.
-                */
-               for (i = 0; i < IWLAGN_NUM_RSC; i++) {
-                       u8 *pn = seq.ccmp.pn;
-
-                       ieee80211_get_key_rx_seq(key, i, &seq);
-                       aes_sc->pn = cpu_to_le64(
-                                       (u64)pn[5] |
-                                       ((u64)pn[4] << 8) |
-                                       ((u64)pn[3] << 16) |
-                                       ((u64)pn[2] << 24) |
-                                       ((u64)pn[1] << 32) |
-                                       ((u64)pn[0] << 40));
-               }
-               data->use_rsc_tsc = true;
-               break;
-       }
-
-       mutex_unlock(&priv->shrd->mutex);
-}
-
-static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
-                             struct cfg80211_wowlan *wowlan)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
-       struct iwl_rxon_cmd rxon;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
-       struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
-       struct wowlan_key_data key_data = {
-               .ctx = ctx,
-               .bssid = ctx->active.bssid_addr,
-               .use_rsc_tsc = false,
-               .tkip = &tkip_cmd,
-               .use_tkip = false,
-       };
-       int ret, i;
-       u16 seq;
-
-       if (WARN_ON(!wowlan))
-               return -EINVAL;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       /* Don't attempt WoWLAN when not associated, tear down instead. */
-       if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
-           !iwl_is_associated_ctx(ctx)) {
-               ret = 1;
-               goto out;
-       }
-
-       key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
-       if (!key_data.rsc_tsc) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
-
-       /*
-        * We know the last used seqno, and the uCode expects to know that
-        * one, it will increment before TX.
-        */
-       seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
-       wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
-
-       /*
-        * For QoS counters, we store the one to use next, so subtract 0x10
-        * since the uCode will add 0x10 before using the value.
-        */
-       for (i = 0; i < 8; i++) {
-               seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
-               seq -= 0x10;
-               wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
-       }
-
-       if (wowlan->disconnect)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
-                                   IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
-       if (wowlan->magic_pkt)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
-       if (wowlan->gtk_rekey_failure)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
-       if (wowlan->eap_identity_req)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
-       if (wowlan->four_way_handshake)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
-       if (wowlan->rfkill_release)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
-       if (wowlan->n_patterns)
-               wakeup_filter_cmd.enabled |=
-                       cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
-
-       iwl_scan_cancel_timeout(priv, 200);
-
-       memcpy(&rxon, &ctx->active, sizeof(rxon));
-
-       iwl_trans_stop_device(trans(priv));
-
-       priv->shrd->wowlan = true;
-
-       ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
-                                          IWL_UCODE_WOWLAN);
-       if (ret)
-               goto error;
-
-       /* now configure WoWLAN ucode */
-       ret = iwl_alive_start(priv);
-       if (ret)
-               goto error;
-
-       memcpy(&ctx->staging, &rxon, sizeof(rxon));
-       ret = iwlagn_commit_rxon(priv, ctx);
-       if (ret)
-               goto error;
-
-       ret = iwl_power_update_mode(priv, true);
-       if (ret)
-               goto error;
-
-       if (!iwlagn_mod_params.sw_crypto) {
-               /* mark all keys clear */
-               priv->ucode_key_table = 0;
-               ctx->key_mapping_keys = 0;
-
-               /*
-                * This needs to be unlocked due to lock ordering
-                * constraints. Since we're in the suspend path
-                * that isn't really a problem though.
-                */
-               mutex_unlock(&priv->shrd->mutex);
-               ieee80211_iter_keys(priv->hw, ctx->vif,
-                                   iwlagn_wowlan_program_keys,
-                                   &key_data);
-               mutex_lock(&priv->shrd->mutex);
-               if (key_data.error) {
-                       ret = -EIO;
-                       goto error;
-               }
-
-               if (key_data.use_rsc_tsc) {
-                       struct iwl_host_cmd rsc_tsc_cmd = {
-                               .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
-                               .flags = CMD_SYNC,
-                               .data[0] = key_data.rsc_tsc,
-                               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
-                               .len[0] = sizeof(*key_data.rsc_tsc),
-                       };
-
-                       ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
-                       if (ret)
-                               goto error;
-               }
-
-               if (key_data.use_tkip) {
-                       ret = iwl_trans_send_cmd_pdu(trans(priv),
-                                                REPLY_WOWLAN_TKIP_PARAMS,
-                                                CMD_SYNC, sizeof(tkip_cmd),
-                                                &tkip_cmd);
-                       if (ret)
-                               goto error;
-               }
-
-               if (priv->have_rekey_data) {
-                       memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
-                       memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
-                       kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
-                       memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
-                       kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
-                       kek_kck_cmd.replay_ctr = priv->replay_ctr;
-
-                       ret = iwl_trans_send_cmd_pdu(trans(priv),
-                                                REPLY_WOWLAN_KEK_KCK_MATERIAL,
-                                                CMD_SYNC, sizeof(kek_kck_cmd),
-                                                &kek_kck_cmd);
-                       if (ret)
-                               goto error;
-               }
-       }
-
-       ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
-                                CMD_SYNC, sizeof(wakeup_filter_cmd),
-                                &wakeup_filter_cmd);
-       if (ret)
-               goto error;
-
-       ret = iwlagn_send_patterns(priv, wowlan);
-       if (ret)
-               goto error;
-
-       device_set_wakeup_enable(bus(priv)->dev, true);
-
-       /* Now let the ucode operate on its own */
-       iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
-                         CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
-       goto out;
-
- error:
-       priv->shrd->wowlan = false;
-       iwlagn_prepare_restart(priv);
-       ieee80211_restart_hw(priv->hw);
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       kfree(key_data.rsc_tsc);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-static int iwlagn_mac_resume(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct ieee80211_vif *vif;
-       unsigned long flags;
-       u32 base, status = 0xffffffff;
-       int ret = -EIO;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
-                         CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
-       base = priv->device_pointers.error_event_table;
-       if (iwlagn_hw_valid_rtc_data_addr(base)) {
-               spin_lock_irqsave(&bus(priv)->reg_lock, flags);
-               ret = iwl_grab_nic_access_silent(bus(priv));
-               if (ret == 0) {
-                       iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
-                       status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
-                       iwl_release_nic_access(bus(priv));
-               }
-               spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               if (ret == 0) {
-                       if (!priv->wowlan_sram)
-                               priv->wowlan_sram =
-                                       kzalloc(priv->ucode_wowlan.data.len,
-                                               GFP_KERNEL);
-
-                       if (priv->wowlan_sram)
-                               _iwl_read_targ_mem_words(
-                                       bus(priv), 0x800000, priv->wowlan_sram,
-                                       priv->ucode_wowlan.data.len / 4);
-               }
-#endif
-       }
-
-       /* we'll clear ctx->vif during iwlagn_prepare_restart() */
-       vif = ctx->vif;
-
-       priv->shrd->wowlan = false;
-
-       device_set_wakeup_enable(bus(priv)->dev, false);
-
-       iwlagn_prepare_restart(priv);
-
-       memset((void *)&ctx->active, 0, sizeof(ctx->active));
-       iwl_connection_init_rx_config(priv, ctx);
-       iwlagn_set_rxon_chain(priv, ctx);
-
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       ieee80211_resume_disconnect(vif);
-
-       return 1;
-}
-#endif
-
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MACDUMP(priv, "enter\n");
-
-       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
-                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
-       if (iwlagn_tx_skb(priv, skb))
-               dev_kfree_skb_any(skb);
-
-       IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif,
-                                      struct ieee80211_key_conf *keyconf,
-                                      struct ieee80211_sta *sta,
-                                      u32 iv32, u16 *phase1key)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
-}
-
-static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_sta *sta,
-                             struct ieee80211_key_conf *key)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       struct iwl_rxon_context *ctx = vif_priv->ctx;
-       int ret;
-       bool is_default_wep_key = false;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (iwlagn_mod_params.sw_crypto) {
-               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
-               return -EOPNOTSUPP;
-       }
-
-       switch (key->cipher) {
-       case WLAN_CIPHER_SUITE_TKIP:
-               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               /* fall through */
-       case WLAN_CIPHER_SUITE_CCMP:
-               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-               break;
-       default:
-               break;
-       }
-
-       /*
-        * We could program these keys into the hardware as well, but we
-        * don't expect much multicast traffic in IBSS and having keys
-        * for more stations is probably more useful.
-        *
-        * Mark key TX-only and return 0.
-        */
-       if (vif->type == NL80211_IFTYPE_ADHOC &&
-           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
-               key->hw_key_idx = WEP_INVALID_OFFSET;
-               return 0;
-       }
-
-       /* If they key was TX-only, accept deletion */
-       if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
-               return 0;
-
-       mutex_lock(&priv->shrd->mutex);
-       iwl_scan_cancel_timeout(priv, 100);
-
-       BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
-
-       /*
-        * If we are getting WEP group key and we didn't receive any key mapping
-        * so far, we are in legacy wep mode (group key only), otherwise we are
-        * in 1X mode.
-        * In legacy wep mode, we use another host command to the uCode.
-        */
-       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-            key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
-               if (cmd == SET_KEY)
-                       is_default_wep_key = !ctx->key_mapping_keys;
-               else
-                       is_default_wep_key =
-                               key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
-       }
-
-
-       switch (cmd) {
-       case SET_KEY:
-               if (is_default_wep_key) {
-                       ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
-                       break;
-               }
-               ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
-               if (ret) {
-                       /*
-                        * can't add key for RX, but we don't need it
-                        * in the device for TX so still return 0
-                        */
-                       ret = 0;
-                       key->hw_key_idx = WEP_INVALID_OFFSET;
-               }
-
-               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
-               break;
-       case DISABLE_KEY:
-               if (is_default_wep_key)
-                       ret = iwl_remove_default_wep_key(priv, ctx, key);
-               else
-                       ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
-
-               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
-                                  struct ieee80211_vif *vif,
-                                  enum ieee80211_ampdu_mlme_action action,
-                                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                  u8 buf_size)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret = -EINVAL;
-       struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
-       struct iwl_rxon_context *ctx =  iwl_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
-                    sta->addr, tid);
-
-       if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
-               return -EACCES;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       switch (action) {
-       case IEEE80211_AMPDU_RX_START:
-               IWL_DEBUG_HT(priv, "start Rx\n");
-               ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
-               break;
-       case IEEE80211_AMPDU_RX_STOP:
-               IWL_DEBUG_HT(priv, "stop Rx\n");
-               ret = iwl_sta_rx_agg_stop(priv, sta, tid);
-               if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
-                       ret = 0;
-               break;
-       case IEEE80211_AMPDU_TX_START:
-               IWL_DEBUG_HT(priv, "start Tx\n");
-               ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
-               break;
-       case IEEE80211_AMPDU_TX_STOP:
-               IWL_DEBUG_HT(priv, "stop Tx\n");
-               ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
-               if ((ret == 0) && (priv->agg_tids_count > 0)) {
-                       priv->agg_tids_count--;
-                       IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
-                                    priv->agg_tids_count);
-               }
-               if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
-                       ret = 0;
-               if (!priv->agg_tids_count && priv->cfg->ht_params &&
-                   priv->cfg->ht_params->use_rts_for_aggregation) {
-                       /*
-                        * switch off RTS/CTS if it was previously enabled
-                        */
-                       sta_priv->lq_sta.lq.general_params.flags &=
-                               ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
-                       iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
-                                       &sta_priv->lq_sta.lq, CMD_ASYNC, false);
-               }
-               break;
-       case IEEE80211_AMPDU_TX_OPERATIONAL:
-               buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
-
-               iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
-                               tid, buf_size);
-
-               /*
-                * If the limit is 0, then it wasn't initialised yet,
-                * use the default. We can do that since we take the
-                * minimum below, and we don't want to go above our
-                * default due to hardware restrictions.
-                */
-               if (sta_priv->max_agg_bufsize == 0)
-                       sta_priv->max_agg_bufsize =
-                               LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-
-               /*
-                * Even though in theory the peer could have different
-                * aggregation reorder buffer sizes for different sessions,
-                * our ucode doesn't allow for that and has a global limit
-                * for each station. Therefore, use the minimum of all the
-                * aggregation sessions and our default value.
-                */
-               sta_priv->max_agg_bufsize =
-                       min(sta_priv->max_agg_bufsize, buf_size);
-
-               if (priv->cfg->ht_params &&
-                   priv->cfg->ht_params->use_rts_for_aggregation) {
-                       /*
-                        * switch to RTS/CTS if it is the prefer protection
-                        * method for HT traffic
-                        */
-
-                       sta_priv->lq_sta.lq.general_params.flags |=
-                               LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
-               }
-               priv->agg_tids_count++;
-               IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
-                            priv->agg_tids_count);
-
-               sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
-                       sta_priv->max_agg_bufsize;
-
-               iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
-                               &sta_priv->lq_sta.lq, CMD_ASYNC, false);
-
-               IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
-                        sta->addr, tid);
-               ret = 0;
-               break;
-       }
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return ret;
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_sta *sta)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
-       int ret = 0;
-       u8 sta_id;
-
-       IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
-                       sta->addr);
-       mutex_lock(&priv->shrd->mutex);
-       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
-                       sta->addr);
-       sta_priv->sta_id = IWL_INVALID_STATION;
-
-       atomic_set(&sta_priv->pending_frames, 0);
-       if (vif->type == NL80211_IFTYPE_AP)
-               sta_priv->client = true;
-
-       ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
-                                    is_ap, sta, &sta_id);
-       if (ret) {
-               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
-                       sta->addr, ret);
-               /* Should we return success if return code is EEXIST ? */
-               goto out;
-       }
-
-       sta_priv->sta_id = sta_id;
-
-       /* Initialize rate scaling */
-       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
-                      sta->addr);
-       iwl_rs_rate_init(priv, sta, sta_id);
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
-                               struct ieee80211_channel_switch *ch_switch)
-{
-       struct iwl_priv *priv = hw->priv;
-       const struct iwl_channel_info *ch_info;
-       struct ieee80211_conf *conf = &hw->conf;
-       struct ieee80211_channel *channel = ch_switch->channel;
-       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-       /*
-        * MULTI-FIXME
-        * When we add support for multiple interfaces, we need to
-        * revisit this. The channel switch command in the device
-        * only affects the BSS context, but what does that really
-        * mean? And what if we get a CSA on the second interface?
-        * This needs a lot of work.
-        */
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       u16 ch;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       mutex_lock(&priv->shrd->mutex);
-
-       if (iwl_is_rfkill(priv->shrd))
-               goto out;
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
-           test_bit(STATUS_SCANNING, &priv->shrd->status) ||
-           test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
-               goto out;
-
-       if (!iwl_is_associated_ctx(ctx))
-               goto out;
-
-       if (!priv->cfg->lib->set_channel_switch)
-               goto out;
-
-       ch = channel->hw_value;
-       if (le16_to_cpu(ctx->active.channel) == ch)
-               goto out;
-
-       ch_info = iwl_get_channel_info(priv, channel->band, ch);
-       if (!is_channel_valid(ch_info)) {
-               IWL_DEBUG_MAC80211(priv, "invalid channel\n");
-               goto out;
-       }
-
-       spin_lock_irq(&priv->shrd->lock);
-
-       priv->current_ht_config.smps = conf->smps_mode;
-
-       /* Configure HT40 channels */
-       ctx->ht.enabled = conf_is_ht(conf);
-       if (ctx->ht.enabled)
-               iwlagn_config_ht40(conf, ctx);
-       else
-               ctx->ht.is_40mhz = false;
-
-       if ((le16_to_cpu(ctx->staging.channel) != ch))
-               ctx->staging.flags = 0;
-
-       iwl_set_rxon_channel(priv, channel, ctx);
-       iwl_set_rxon_ht(priv, ht_conf);
-       iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
-       spin_unlock_irq(&priv->shrd->lock);
-
-       iwl_set_rate(priv);
-       /*
-        * at this point, staging_rxon has the
-        * configuration for channel switch
-        */
-       set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
-       priv->switch_channel = cpu_to_le16(ch);
-       if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
-               clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
-               priv->switch_channel = 0;
-               ieee80211_chswitch_done(ctx->vif, false);
-       }
-
-out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
-                                   unsigned int changed_flags,
-                                   unsigned int *total_flags,
-                                   u64 multicast)
-{
-       struct iwl_priv *priv = hw->priv;
-       __le32 filter_or = 0, filter_nand = 0;
-       struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag)        do { \
-       if (*total_flags & (test))              \
-               filter_or |= (flag);            \
-       else                                    \
-               filter_nand |= (flag);          \
-       } while (0)
-
-       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-                       changed_flags, *total_flags);
-
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-       /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
-       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
-       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-       mutex_lock(&priv->shrd->mutex);
-
-       for_each_context(priv, ctx) {
-               ctx->staging.filter_flags &= ~filter_nand;
-               ctx->staging.filter_flags |= filter_or;
-
-               /*
-                * Not committing directly because hardware can perform a scan,
-                * but we'll eventually commit the filter flags change anyway.
-                */
-       }
-
-       mutex_unlock(&priv->shrd->mutex);
-
-       /*
-        * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_connection_init_rx_config()
-        * since we currently do not support programming multicast
-        * filters into the device.
-        */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       mutex_lock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
-               IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
-               goto done;
-       }
-       if (iwl_is_rfkill(priv->shrd)) {
-               IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
-               goto done;
-       }
-
-       /*
-        * mac80211 will not push any more frames for transmit
-        * until the flush is completed
-        */
-       if (drop) {
-               IWL_DEBUG_MAC80211(priv, "send flush command\n");
-               if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
-                       IWL_ERR(priv, "flush request fail\n");
-                       goto done;
-               }
-       }
-       IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
-       iwl_trans_wait_tx_queue_empty(trans(priv));
-done:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwlagn_disable_roc(struct iwl_priv *priv)
-{
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-
-       lockdep_assert_held(&priv->shrd->mutex);
-
-       if (!priv->hw_roc_setup)
-               return;
-
-       ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
-       ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
-       priv->hw_roc_channel = NULL;
-
-       memset(ctx->staging.node_addr, 0, ETH_ALEN);
-
-       iwlagn_commit_rxon(priv, ctx);
-
-       ctx->is_active = false;
-       priv->hw_roc_setup = false;
-}
-
-static void iwlagn_disable_roc_work(struct work_struct *work)
-{
-       struct iwl_priv *priv = container_of(work, struct iwl_priv,
-                                            hw_roc_disable_work.work);
-
-       mutex_lock(&priv->shrd->mutex);
-       iwlagn_disable_roc(priv);
-       mutex_unlock(&priv->shrd->mutex);
-}
-
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
-                                    struct ieee80211_channel *channel,
-                                    enum nl80211_channel_type channel_type,
-                                    int duration)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-       int err = 0;
-
-       if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
-               return -EOPNOTSUPP;
-
-       if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
-               return -EOPNOTSUPP;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       priv->hw_roc_channel = channel;
-       priv->hw_roc_chantype = channel_type;
-       priv->hw_roc_duration = duration;
-       priv->hw_roc_start_notified = false;
-       cancel_delayed_work(&priv->hw_roc_disable_work);
-
-       if (!ctx->is_active) {
-               ctx->is_active = true;
-               ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
-               memcpy(ctx->staging.node_addr,
-                      priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
-                      ETH_ALEN);
-               memcpy(ctx->staging.bssid_addr,
-                      priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
-                      ETH_ALEN);
-               err = iwlagn_commit_rxon(priv, ctx);
-               if (err)
-                       goto out;
-               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
-                                            RXON_FILTER_PROMISC_MSK |
-                                            RXON_FILTER_CTL2HOST_MSK;
-
-               err = iwlagn_commit_rxon(priv, ctx);
-               if (err) {
-                       iwlagn_disable_roc(priv);
-                       goto out;
-               }
-               priv->hw_roc_setup = true;
-       }
-
-       err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
-       if (err)
-               iwlagn_disable_roc(priv);
-
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
-               return -EOPNOTSUPP;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-       iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
-       iwlagn_disable_roc(priv);
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return 0;
-}
-
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif,
-                             const u8 *bssid,
-                             enum ieee80211_tx_sync_type type)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       struct iwl_rxon_context *ctx = vif_priv->ctx;
-       int ret;
-       u8 sta_id;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       if (iwl_is_associated_ctx(ctx)) {
-               ret = 0;
-               goto out;
-       }
-
-       if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
-       if (ret)
-               goto out;
-
-       if (WARN_ON(sta_id != ctx->ap_sta_id)) {
-               ret = -EIO;
-               goto out_remove_sta;
-       }
-
-       memcpy(ctx->bssid, bssid, ETH_ALEN);
-       ctx->preauth_bssid = true;
-
-       ret = iwlagn_commit_rxon(priv, ctx);
-
-       if (ret == 0)
-               goto out;
-
- out_remove_sta:
-       iwl_remove_station(priv, sta_id, bssid);
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
-                                  struct ieee80211_vif *vif,
-                                  const u8 *bssid,
-                                  enum ieee80211_tx_sync_type type)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       struct iwl_rxon_context *ctx = vif_priv->ctx;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       if (iwl_is_associated_ctx(ctx))
-               goto out;
-
-       iwl_remove_station(priv, ctx->ap_sta_id, bssid);
-       ctx->preauth_bssid = false;
-       /* no need to commit */
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl_setup_deferred_work(struct iwl_priv *priv)
-{
-       priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME);
-
-       init_waitqueue_head(&priv->shrd->wait_command_queue);
-
-       INIT_WORK(&priv->restart, iwl_bg_restart);
-       INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
-       INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
-       INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
-       INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
-       INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
-       INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
-                         iwlagn_disable_roc_work);
-
-       iwl_setup_scan_deferred_work(priv);
-
-       if (priv->cfg->lib->bt_setup_deferred_work)
-               priv->cfg->lib->bt_setup_deferred_work(priv);
-
-       init_timer(&priv->statistics_periodic);
-       priv->statistics_periodic.data = (unsigned long)priv;
-       priv->statistics_periodic.function = iwl_bg_statistics_periodic;
-
-       init_timer(&priv->ucode_trace);
-       priv->ucode_trace.data = (unsigned long)priv;
-       priv->ucode_trace.function = iwl_bg_ucode_trace;
-
-       init_timer(&priv->watchdog);
-       priv->watchdog.data = (unsigned long)priv;
-       priv->watchdog.function = iwl_bg_watchdog;
-}
-
-static void iwl_cancel_deferred_work(struct iwl_priv *priv)
-{
-       if (priv->cfg->lib->cancel_deferred_work)
-               priv->cfg->lib->cancel_deferred_work(priv);
-
-       cancel_work_sync(&priv->run_time_calib_work);
-       cancel_work_sync(&priv->beacon_update);
-
-       iwl_cancel_scan_deferred_work(priv);
-
-       cancel_work_sync(&priv->bt_full_concurrency);
-       cancel_work_sync(&priv->bt_runtime_config);
-       cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
-       del_timer_sync(&priv->statistics_periodic);
-       del_timer_sync(&priv->ucode_trace);
-}
-
-static void iwl_init_hw_rates(struct iwl_priv *priv,
-                             struct ieee80211_rate *rates)
-{
-       int i;
-
-       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
-               rates[i].bitrate = iwl_rates[i].ieee * 5;
-               rates[i].hw_value = i; /* Rate scaling will work on indexes */
-               rates[i].hw_value_short = i;
-               rates[i].flags = 0;
-               if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
-                       /*
-                        * If CCK != 1M then set short preamble rate flag.
-                        */
-                       rates[i].flags |=
-                               (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
-                                       0 : IEEE80211_RATE_SHORT_PREAMBLE;
-               }
-       }
-}
-
-static int iwl_init_drv(struct iwl_priv *priv)
-{
-       int ret;
-
-       spin_lock_init(&priv->shrd->sta_lock);
-
-       mutex_init(&priv->shrd->mutex);
-
-       priv->ieee_channels = NULL;
-       priv->ieee_rates = NULL;
-       priv->band = IEEE80211_BAND_2GHZ;
-
-       priv->iw_mode = NL80211_IFTYPE_STATION;
-       priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
-       priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-       priv->agg_tids_count = 0;
-
-       /* initialize force reset */
-       priv->force_reset[IWL_RF_RESET].reset_duration =
-               IWL_DELAY_NEXT_FORCE_RF_RESET;
-       priv->force_reset[IWL_FW_RESET].reset_duration =
-               IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
-       priv->rx_statistics_jiffies = jiffies;
-
-       /* Choose which receivers/antennas to use */
-       iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
-
-       iwl_init_scan_params(priv);
-
-       /* init bt coex */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
-               priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
-               priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
-               priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
-               priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
-               priv->bt_duration = BT_DURATION_LIMIT_DEF;
-               priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
-       }
-
-       ret = iwl_init_channel_map(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
-               goto err;
-       }
-
-       ret = iwl_init_geos(priv);
-       if (ret) {
-               IWL_ERR(priv, "initializing geos failed: %d\n", ret);
-               goto err_free_channel_map;
-       }
-       iwl_init_hw_rates(priv, priv->ieee_rates);
-
-       return 0;
-
-err_free_channel_map:
-       iwl_free_channel_map(priv);
-err:
-       return ret;
-}
+err_free_channel_map:
+       iwl_free_channel_map(priv);
+err:
+       return ret;
+}
 
 static void iwl_uninit_drv(struct iwl_priv *priv)
 {
@@ -3061,81 +1640,13 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
                kmem_cache_destroy(priv->tx_cmd_pool);
        kfree(priv->scan_cmd);
        kfree(priv->beacon_cmd);
+       kfree(rcu_dereference_raw(priv->noa_data));
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        kfree(priv->wowlan_sram);
 #endif
 }
 
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
-                          enum ieee80211_rssi_event rssi_event)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->shrd->mutex);
-
-       if (priv->cfg->bt_params &&
-                       priv->cfg->bt_params->advanced_bt_coexist) {
-               if (rssi_event == RSSI_EVENT_LOW)
-                       priv->bt_enable_pspoll = true;
-               else if (rssi_event == RSSI_EVENT_HIGH)
-                       priv->bt_enable_pspoll = false;
-
-               iwlagn_send_advance_bt_config(priv);
-       } else {
-               IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
-                               "ignoring RSSI callback\n");
-       }
-
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
-                          struct ieee80211_sta *sta, bool set)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       queue_work(priv->shrd->workqueue, &priv->beacon_update);
 
-       return 0;
-}
-
-struct ieee80211_ops iwlagn_hw_ops = {
-       .tx = iwlagn_mac_tx,
-       .start = iwlagn_mac_start,
-       .stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM_SLEEP
-       .suspend = iwlagn_mac_suspend,
-       .resume = iwlagn_mac_resume,
-#endif
-       .add_interface = iwlagn_mac_add_interface,
-       .remove_interface = iwlagn_mac_remove_interface,
-       .change_interface = iwlagn_mac_change_interface,
-       .config = iwlagn_mac_config,
-       .configure_filter = iwlagn_configure_filter,
-       .set_key = iwlagn_mac_set_key,
-       .update_tkip_key = iwlagn_mac_update_tkip_key,
-       .set_rekey_data = iwlagn_mac_set_rekey_data,
-       .conf_tx = iwlagn_mac_conf_tx,
-       .bss_info_changed = iwlagn_bss_info_changed,
-       .ampdu_action = iwlagn_mac_ampdu_action,
-       .hw_scan = iwlagn_mac_hw_scan,
-       .sta_notify = iwlagn_mac_sta_notify,
-       .sta_add = iwlagn_mac_sta_add,
-       .sta_remove = iwlagn_mac_sta_remove,
-       .channel_switch = iwlagn_mac_channel_switch,
-       .flush = iwlagn_mac_flush,
-       .tx_last_beacon = iwlagn_mac_tx_last_beacon,
-       .remain_on_channel = iwlagn_mac_remain_on_channel,
-       .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
-       .rssi_callback = iwlagn_mac_rssi_callback,
-       CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
-       CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
-       .tx_sync = iwlagn_mac_tx_sync,
-       .finish_tx_sync = iwlagn_mac_finish_tx_sync,
-       .set_tim = iwlagn_mac_set_tim,
-};
 
 static u32 iwl_hw_detect(struct iwl_priv *priv)
 {
@@ -3169,26 +1680,35 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
        return priv->cfg->lib->set_hw_params(priv);
 }
 
-/* This function both allocates and initializes hw and priv. */
-static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
-{
-       struct iwl_priv *priv;
-       /* mac80211 allocates memory for this device instance, including
-        *   space for this driver's private structure */
-       struct ieee80211_hw *hw;
 
-       hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
-       if (hw == NULL) {
-               pr_err("%s: Can not allocate network device\n",
-                      cfg->name);
-               goto out;
-       }
 
-       priv = hw->priv;
-       priv->hw = hw;
+static void iwl_debug_config(struct iwl_priv *priv)
+{
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
 
-out:
-       return hw;
+       dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_SVTOOL "
+#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+               "enabled\n");
+#else
+               "disabled\n");
+#endif
 }
 
 int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
@@ -3203,8 +1723,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
        /************************
         * 1. Allocating HW data
         ************************/
-       hw = iwl_alloc_all(cfg);
+       hw = iwl_alloc_all();
        if (!hw) {
+               pr_err("%s: Cannot allocate network device\n", cfg->name);
                err = -ENOMEM;
                goto out;
        }
@@ -3225,6 +1746,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
 
        SET_IEEE80211_DEV(hw, bus(priv)->dev);
 
+       /* what debugging capabilities we have */
+       iwl_debug_config(priv);
+
        IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
        priv->cfg = cfg;
 
@@ -3396,7 +1920,7 @@ void __devexit iwl_remove(struct iwl_priv * priv)
        /*This will stop the queues, move the device to low power state */
        iwl_trans_stop_device(trans(priv));
 
-       iwl_dealloc_ucode(priv);
+       iwl_dealloc_ucode(trans(priv));
 
        iwl_eeprom_free(priv);
 
index 3856aba..f2f1070 100644 (file)
 
 #include "iwl-dev.h"
 
+struct iwlagn_ucode_capabilities {
+       u32 max_probe_length;
+       u32 standard_phy_calibration_size;
+       u32 flags;
+};
+
 extern struct ieee80211_ops iwlagn_hw_ops;
 
 int iwl_reset_ict(struct iwl_trans *trans);
@@ -77,6 +83,15 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
        hdr->data_valid = 1;
 }
 
+void __iwl_down(struct iwl_priv *priv);
+void iwl_down(struct iwl_priv *priv);
+void iwlagn_prepare_restart(struct iwl_priv *priv);
+
+/* MAC80211 */
+struct ieee80211_hw *iwl_alloc_all(void);
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+                             struct iwlagn_ucode_capabilities *capa);
+
 /* RXON */
 int iwlagn_set_pan_params(struct iwl_priv *priv);
 int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -93,12 +108,11 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
 int iwlagn_rx_calib_result(struct iwl_priv *priv,
                            struct iwl_rx_mem_buffer *rxb,
                            struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
 int iwlagn_run_init_ucode(struct iwl_priv *priv);
 int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
-                                struct fw_img *image,
-                                enum iwlagn_ucode_type ucode_type);
+                                enum iwl_ucode_type ucode_type);
 
 /* lib */
 int iwlagn_send_tx_power(struct iwl_priv *priv);
@@ -107,6 +121,12 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
 int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
 int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+int iwlagn_send_patterns(struct iwl_priv *priv,
+                        struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv,
+                  struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+#endif
 
 /* rx */
 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
@@ -198,9 +218,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                           struct ieee80211_sta *sta, u8 *sta_id_r);
 int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
                       const u8 *addr);
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                      struct ieee80211_sta *sta);
-
 u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                    const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
 
@@ -318,10 +335,6 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
 int iwl_update_bcast_station(struct iwl_priv *priv,
                             struct iwl_rxon_context *ctx);
 int iwl_update_bcast_stations(struct iwl_priv *priv);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
-                          struct ieee80211_vif *vif,
-                          enum sta_notify_cmd cmd,
-                          struct ieee80211_sta *sta);
 
 /* rate */
 static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -343,22 +356,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
 void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
 void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
 
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
-                             struct iwl_notification_wait *wait_entry,
-                             u8 cmd,
-                             void (*fn)(struct iwl_priv *priv,
-                                        struct iwl_rx_packet *pkt,
-                                        void *data),
-                             void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
-                        struct iwl_notification_wait *wait_entry,
-                        unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
-                          struct iwl_notification_wait *wait_entry);
 extern int iwlagn_init_alive_start(struct iwl_priv *priv);
 extern int iwl_alive_start(struct iwl_priv *priv);
 /* svtool */
index 2a2dc45..e1d7825 100644 (file)
@@ -101,17 +101,11 @@ extern struct iwl_cfg iwl100_bg_cfg;
 extern struct iwl_cfg iwl130_bgn_cfg;
 extern struct iwl_cfg iwl130_bg_cfg;
 extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
 extern struct iwl_cfg iwl2000_2bgn_d_cfg;
 extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
 extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
 extern struct iwl_cfg iwl105_bgn_cfg;
 extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
 extern struct iwl_cfg iwl135_bgn_cfg;
 
 #endif /* __iwl_pci_h__ */
index 69d5f85..f4eccf5 100644 (file)
@@ -198,6 +198,7 @@ enum {
        REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
        REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
        REPLY_WOWLAN_GET_STATUS = 0xe5,
+       REPLY_D3_CONFIG = 0xd3,
 
        REPLY_MAX = 0xff
 };
@@ -3801,6 +3802,19 @@ struct iwl_bt_coex_prot_env_cmd {
 } __attribute__((packed));
 
 /*
+ * REPLY_D3_CONFIG
+ */
+enum iwlagn_d3_wakeup_filters {
+       IWLAGN_D3_WAKEUP_RFKILL         = BIT(0),
+       IWLAGN_D3_WAKEUP_SYSASSERT      = BIT(1),
+};
+
+struct iwlagn_d3_config_cmd {
+       __le32 min_sleep_time;
+       __le32 wakeup_flags;
+} __packed;
+
+/*
  * REPLY_WOWLAN_PATTERNS
  */
 #define IWLAGN_WOWLAN_MIN_PATTERN_LEN  16
@@ -3830,19 +3844,16 @@ enum iwlagn_wowlan_wakeup_filters {
        IWLAGN_WOWLAN_WAKEUP_BEACON_MISS        = BIT(2),
        IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE        = BIT(3),
        IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL     = BIT(4),
-       IWLAGN_WOWLAN_WAKEUP_RFKILL             = BIT(5),
-       IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR        = BIT(6),
-       IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ      = BIT(7),
-       IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE     = BIT(8),
-       IWLAGN_WOWLAN_WAKEUP_ALWAYS             = BIT(9),
-       IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT  = BIT(10),
+       IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ      = BIT(5),
+       IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE     = BIT(6),
+       IWLAGN_WOWLAN_WAKEUP_ALWAYS             = BIT(7),
+       IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT  = BIT(8),
 };
 
 struct iwlagn_wowlan_wakeup_filter_cmd {
        __le32 enabled;
        __le16 non_qos_seq;
-       u8 min_sleep_seconds;
-       u8 reserved;
+       __le16 reserved;
        __le16 qos_seq[8];
 };
 
index fcf5416..3b6f48b 100644 (file)
@@ -836,19 +836,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
 }
 #endif
 
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
-       unsigned long flags;
-       struct iwl_notification_wait *wait_entry;
-
-       spin_lock_irqsave(&priv->notif_wait_lock, flags);
-       list_for_each_entry(wait_entry, &priv->notif_waits, list)
-               wait_entry->aborted = true;
-       spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
-       wake_up_all(&priv->notif_waitq);
-}
-
 void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
 {
        unsigned int reload_msec;
@@ -860,7 +847,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
        /* Cancel currently queued command. */
        clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
 
-       iwlagn_abort_notification_waits(priv);
+       iwl_abort_notification_waits(priv->shrd);
 
        /* Keep the restart process from trying to send host
         * commands by clearing the ready bit */
@@ -1120,229 +1107,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
                                        &statistics_cmd);
 }
 
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif, u16 queue,
-                   const struct ieee80211_tx_queue_params *params)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx;
-       unsigned long flags;
-       int q;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (!iwl_is_ready_rf(priv->shrd)) {
-               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
-               return -EIO;
-       }
-
-       if (queue >= AC_NUM) {
-               IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
-               return 0;
-       }
-
-       q = AC_NUM - 1 - queue;
-
-       spin_lock_irqsave(&priv->shrd->lock, flags);
-
-       /*
-        * MULTI-FIXME
-        * This may need to be done per interface in nl80211/cfg80211/mac80211.
-        */
-       for_each_context(priv, ctx) {
-               ctx->qos_data.def_qos_parm.ac[q].cw_min =
-                       cpu_to_le16(params->cw_min);
-               ctx->qos_data.def_qos_parm.ac[q].cw_max =
-                       cpu_to_le16(params->cw_max);
-               ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
-               ctx->qos_data.def_qos_parm.ac[q].edca_txop =
-                               cpu_to_le16((params->txop * 32));
-
-               ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
-       }
-
-       spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return 0;
-}
-
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
-       struct iwl_priv *priv = hw->priv;
-
-       return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-
-static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
-       iwl_connection_init_rx_config(priv, ctx);
-
-       iwlagn_set_rxon_chain(priv, ctx);
-
-       return iwlagn_commit_rxon(priv, ctx);
-}
-
-static int iwl_setup_interface(struct iwl_priv *priv,
-                              struct iwl_rxon_context *ctx)
-{
-       struct ieee80211_vif *vif = ctx->vif;
-       int err;
-
-       lockdep_assert_held(&priv->shrd->mutex);
-
-       /*
-        * This variable will be correct only when there's just
-        * a single context, but all code using it is for hardware
-        * that supports only one context.
-        */
-       priv->iw_mode = vif->type;
-
-       ctx->is_active = true;
-
-       err = iwl_set_mode(priv, ctx);
-       if (err) {
-               if (!ctx->always_active)
-                       ctx->is_active = false;
-               return err;
-       }
-
-       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
-           vif->type == NL80211_IFTYPE_ADHOC) {
-               /*
-                * pretend to have high BT traffic as long as we
-                * are operating in IBSS mode, as this will cause
-                * the rate scaling etc. to behave as intended.
-                */
-               priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
-       }
-
-       return 0;
-}
-
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
-                            struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-       struct iwl_rxon_context *tmp, *ctx = NULL;
-       int err;
-       enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
-
-       IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
-                          viftype, vif->addr);
-
-       cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
-       mutex_lock(&priv->shrd->mutex);
-
-       iwlagn_disable_roc(priv);
-
-       if (!iwl_is_ready_rf(priv->shrd)) {
-               IWL_WARN(priv, "Try to add interface when device not ready\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       for_each_context(priv, tmp) {
-               u32 possible_modes =
-                       tmp->interface_modes | tmp->exclusive_interface_modes;
-
-               if (tmp->vif) {
-                       /* check if this busy context is exclusive */
-                       if (tmp->exclusive_interface_modes &
-                                               BIT(tmp->vif->type)) {
-                               err = -EINVAL;
-                               goto out;
-                       }
-                       continue;
-               }
-
-               if (!(possible_modes & BIT(viftype)))
-                       continue;
-
-               /* have maybe usable context w/o interface */
-               ctx = tmp;
-               break;
-       }
-
-       if (!ctx) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
-
-       vif_priv->ctx = ctx;
-       ctx->vif = vif;
-
-       err = iwl_setup_interface(priv, ctx);
-       if (!err)
-               goto out;
-
-       ctx->vif = NULL;
-       priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
-       mutex_unlock(&priv->shrd->mutex);
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-       return err;
-}
-
-static void iwl_teardown_interface(struct iwl_priv *priv,
-                                  struct ieee80211_vif *vif,
-                                  bool mode_change)
-{
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       lockdep_assert_held(&priv->shrd->mutex);
-
-       if (priv->scan_vif == vif) {
-               iwl_scan_cancel_timeout(priv, 200);
-               iwl_force_scan_end(priv);
-       }
-
-       if (!mode_change) {
-               iwl_set_mode(priv, ctx);
-               if (!ctx->always_active)
-                       ctx->is_active = false;
-       }
-
-       /*
-        * When removing the IBSS interface, overwrite the
-        * BT traffic load with the stored one from the last
-        * notification, if any. If this is a device that
-        * doesn't implement this, this has no effect since
-        * both values are the same and zero.
-        */
-       if (vif->type == NL80211_IFTYPE_ADHOC)
-               priv->bt_traffic_load = priv->last_bt_traffic_load;
-}
-
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       mutex_lock(&priv->shrd->mutex);
-
-       if (WARN_ON(ctx->vif != vif)) {
-               struct iwl_rxon_context *tmp;
-               IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
-               for_each_context(priv, tmp)
-                       IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
-                               tmp->ctxid, tmp, tmp->vif);
-       }
-       ctx->vif = NULL;
-
-       iwl_teardown_interface(priv, vif, false);
 
-       mutex_unlock(&priv->shrd->mutex);
 
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 
@@ -1649,97 +1415,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
        return 0;
 }
 
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
-                               struct ieee80211_vif *vif,
-                               enum nl80211_iftype newtype, bool newp2p)
-{
-       struct iwl_priv *priv = hw->priv;
-       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-       struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_rxon_context *tmp;
-       enum nl80211_iftype newviftype = newtype;
-       u32 interface_modes;
-       int err;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
-       mutex_lock(&priv->shrd->mutex);
-
-       if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
-               /*
-                * Huh? But wait ... this can maybe happen when
-                * we're in the middle of a firmware restart!
-                */
-               err = -EBUSY;
-               goto out;
-       }
-
-       interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
-       if (!(interface_modes & BIT(newtype))) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       /*
-        * Refuse a change that should be done by moving from the PAN
-        * context to the BSS context instead, if the BSS context is
-        * available and can support the new interface type.
-        */
-       if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
-           (bss_ctx->interface_modes & BIT(newtype) ||
-            bss_ctx->exclusive_interface_modes & BIT(newtype))) {
-               BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-               err = -EBUSY;
-               goto out;
-       }
-
-       if (ctx->exclusive_interface_modes & BIT(newtype)) {
-               for_each_context(priv, tmp) {
-                       if (ctx == tmp)
-                               continue;
-
-                       if (!tmp->vif)
-                               continue;
-
-                       /*
-                        * The current mode switch would be exclusive, but
-                        * another context is active ... refuse the switch.
-                        */
-                       err = -EBUSY;
-                       goto out;
-               }
-       }
-
-       /* success */
-       iwl_teardown_interface(priv, vif, true);
-       vif->type = newviftype;
-       vif->p2p = newp2p;
-       err = iwl_setup_interface(priv, ctx);
-       WARN_ON(err);
-       /*
-        * We've switched internally, but submitting to the
-        * device may have failed for some reason. Mask this
-        * error, because otherwise mac80211 will not switch
-        * (and set the interface type back) and we'll be
-        * out of sync with it.
-        */
-       err = 0;
-
- out:
-       mutex_unlock(&priv->shrd->mutex);
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       return err;
-}
 
 int iwl_cmd_echo_test(struct iwl_priv *priv)
 {
        int ret;
        struct iwl_host_cmd cmd = {
                .id = REPLY_ECHO,
+               .len = { 0 },
                .flags = CMD_SYNC,
        };
 
index f2fc288..6da53a3 100644 (file)
@@ -186,8 +186,9 @@ struct iwl_ht_params {
  * @ht_params: point to ht patameters
  * @bt_params: pointer to bt parameters
  * @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
  * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ *     don't send it to those
  * @scan_antennas: available antenna for scan operation
  * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
  * @adv_pm: advance power management
@@ -224,8 +225,8 @@ struct iwl_cfg {
        struct iwl_ht_params *ht_params;
        struct iwl_bt_params *bt_params;
        enum iwl_pa_type pa_type;         /* if used set to IWL_PA_SYSTEM */
-       const bool need_dc_calib;         /* if used set to true */
        const bool need_temp_offset_calib; /* if used set to true */
+       const bool no_xtal_calib;
        u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
        enum iwl_led_mode led_mode;
        const bool adv_pm;
@@ -239,10 +240,6 @@ struct iwl_cfg {
  *   L i b                 *
  ***************************/
 
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif, u16 queue,
-                   const struct ieee80211_tx_queue_params *params);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                           int hw_decrypt);
 int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -262,13 +259,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
 void iwl_connection_init_rx_config(struct iwl_priv *priv,
                                   struct iwl_rxon_context *ctx);
 void iwl_set_rate(struct iwl_priv *priv);
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
-                         struct ieee80211_vif *vif);
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
-                             struct ieee80211_vif *vif);
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
-                            struct ieee80211_vif *vif,
-                            enum nl80211_iftype newtype, bool newp2p);
 int iwl_cmd_echo_test(struct iwl_priv *priv);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_alloc_traffic_mem(struct iwl_priv *priv);
@@ -325,9 +315,6 @@ void iwl_init_scan_params(struct iwl_priv *priv);
 int iwl_scan_cancel(struct iwl_priv *priv);
 void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
 void iwl_force_scan_end(struct iwl_priv *priv);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif,
-                   struct cfg80211_scan_request *req);
 void iwl_internal_short_hw_scan(struct iwl_priv *priv);
 int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
 u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
index b9f3267..fbc3095 100644 (file)
 #define CSR_HW_REV_TYPE_6x35          CSR_HW_REV_TYPE_6x05
 #define CSR_HW_REV_TYPE_2x30          (0x00000C0)
 #define CSR_HW_REV_TYPE_2x00          (0x0000100)
-#define CSR_HW_REV_TYPE_200           (0x0000110)
-#define CSR_HW_REV_TYPE_230           (0x0000120)
+#define CSR_HW_REV_TYPE_105           (0x0000110)
+#define CSR_HW_REV_TYPE_135           (0x0000120)
 #define CSR_HW_REV_TYPE_NONE           (0x00001F0)
 
 /* EEPROM REG */
index 69a77e2..f8fc239 100644 (file)
@@ -47,20 +47,21 @@ do {                                                                        \
 } while (0)
 
 #ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, args...)                              \
+#define IWL_DEBUG(m, level, fmt, ...)                                  \
 do {                                                                   \
        if (iwl_get_debug_level((m)->shrd) & (level))                   \
-               dev_printk(KERN_ERR, bus(m)->dev,                       \
-                        "%c %s " fmt, in_interrupt() ? 'I' : 'U',      \
-                       __func__ , ## args);                            \
+               dev_err(bus(m)->dev, "%c %s " fmt,                      \
+                       in_interrupt() ? 'I' : 'U', __func__,           \
+                       ##__VA_ARGS__);                                 \
 } while (0)
 
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)                                \
+#define IWL_DEBUG_LIMIT(m, level, fmt, ...)                            \
 do {                                                                   \
-       if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
-               dev_printk(KERN_ERR, bus(m)->dev,                       \
-                       "%c %s " fmt, in_interrupt() ? 'I' : 'U',       \
-                        __func__ , ## args);                           \
+       if (iwl_get_debug_level((m)->shrd) & (level) &&                 \
+           net_ratelimit())                                            \
+               dev_err(bus(m)->dev, "%c %s " fmt,                      \
+                       in_interrupt() ? 'I' : 'U', __func__,           \
+                       ##__VA_ARGS__);                                 \
 } while (0)
 
 #define iwl_print_hex_dump(m, level, p, len)                           \
@@ -70,10 +71,29 @@ do {                                                                \
                               DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);   \
 } while (0)
 
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...)                            \
+do {                                                                   \
+       if (!iwl_is_rfkill(p->shrd))                                    \
+               dev_err(bus(p)->dev, "%s%c %s " fmt,                    \
+                       "",                                             \
+                       in_interrupt() ? 'I' : 'U', __func__,           \
+                       ##__VA_ARGS__);                                 \
+       else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO)           \
+               dev_err(bus(p)->dev, "%s%c %s " fmt,                    \
+                       "(RFKILL) ",                                    \
+                       in_interrupt() ? 'I' : 'U', __func__,           \
+                       ##__VA_ARGS__);                                 \
+} while (0)
+
 #else
 #define IWL_DEBUG(m, level, fmt, args...)
 #define IWL_DEBUG_LIMIT(m, level, fmt, args...)
 #define iwl_print_hex_dump(m, level, p, len)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...)        \
+do {                                                   \
+       if (!iwl_is_rfkill(p->shrd))                    \
+               IWL_ERR(p, fmt, ##args);                \
+} while (0)
 #endif                         /* CONFIG_IWLWIFI_DEBUG */
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -114,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
  */
 
 /* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO            (1 << 0)
-#define IWL_DL_MAC80211                (1 << 1)
-#define IWL_DL_HCMD            (1 << 2)
-#define IWL_DL_STATE           (1 << 3)
+#define IWL_DL_INFO            0x00000001
+#define IWL_DL_MAC80211                0x00000002
+#define IWL_DL_HCMD            0x00000004
+#define IWL_DL_STATE           0x00000008
 /* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP         (1 << 4)
-#define IWL_DL_HCMD_DUMP       (1 << 5)
-#define IWL_DL_EEPROM          (1 << 6)
-#define IWL_DL_RADIO           (1 << 7)
+#define IWL_DL_EEPROM          0x00000040
+#define IWL_DL_RADIO           0x00000080
 /* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER           (1 << 8)
-#define IWL_DL_TEMP            (1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN            (1 << 11)
+#define IWL_DL_POWER           0x00000100
+#define IWL_DL_TEMP            0x00000200
+#define IWL_DL_SCAN            0x00000800
 /* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC           (1 << 12)
-#define IWL_DL_DROP            (1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX            (1 << 15)
+#define IWL_DL_ASSOC           0x00001000
+#define IWL_DL_DROP            0x00002000
+#define IWL_DL_COEX            0x00008000
 /* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW              (1 << 16)
-#define IWL_DL_RF_KILL         (1 << 17)
-#define IWL_DL_FW_ERRORS       (1 << 18)
-#define IWL_DL_LED             (1 << 19)
+#define IWL_DL_FW              0x00010000
+#define IWL_DL_RF_KILL         0x00020000
+#define IWL_DL_FW_ERRORS       0x00040000
+#define IWL_DL_LED             0x00080000
 /* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE            (1 << 20)
-#define IWL_DL_CALIB           (1 << 21)
-#define IWL_DL_WEP             (1 << 22)
-#define IWL_DL_TX              (1 << 23)
+#define IWL_DL_RATE            0x00100000
+#define IWL_DL_CALIB           0x00200000
+#define IWL_DL_WEP             0x00400000
+#define IWL_DL_TX              0x00800000
 /* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX              (1 << 24)
-#define IWL_DL_ISR             (1 << 25)
-#define IWL_DL_HT              (1 << 26)
+#define IWL_DL_RX              0x01000000
+#define IWL_DL_ISR             0x02000000
+#define IWL_DL_HT              0x04000000
 /* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H             (1 << 28)
-#define IWL_DL_STATS           (1 << 29)
-#define IWL_DL_TX_REPLY                (1 << 30)
-#define IWL_DL_QOS             (1 << 31)
+#define IWL_DL_11H             0x10000000
+#define IWL_DL_STATS           0x20000000
+#define IWL_DL_TX_REPLY                0x40000000
+#define IWL_DL_TX_QUEUES       0x80000000
 
 #define IWL_DEBUG_INFO(p, f, a...)     IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
 #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
 #define IWL_DEBUG_TEMP(p, f, a...)     IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
 #define IWL_DEBUG_SCAN(p, f, a...)     IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
 #define IWL_DEBUG_RX(p, f, a...)       IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -164,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
 #define IWL_DEBUG_LED(p, f, a...)      IWL_DEBUG(p, IWL_DL_LED, f, ## a)
 #define IWL_DEBUG_WEP(p, f, a...)      IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
 #define IWL_DEBUG_HC(p, f, a...)       IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...)  IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
 #define IWL_DEBUG_EEPROM(p, f, a...)   IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
 #define IWL_DEBUG_CALIB(p, f, a...)    IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
 #define IWL_DEBUG_FW(p, f, a...)       IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -186,9 +200,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
 #define IWL_DEBUG_STATS_LIMIT(p, f, a...)      \
                IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
 #define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
-               IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...)      IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_TX_QUEUES(p, f, a...)        IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
 #define IWL_DEBUG_RADIO(p, f, a...)    IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
 #define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
 #define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
index a1670e3..ccbcab4 100644 (file)
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
 
        /* default is to dump the entire data segment */
        if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+               struct iwl_trans *trans = trans(priv);
                priv->dbgfs_sram_offset = 0x800000;
-               if (priv->ucode_type == IWL_UCODE_INIT)
-                       priv->dbgfs_sram_len = priv->ucode_init.data.len;
+               if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+                       priv->dbgfs_sram_len = trans->ucode_init.data.len;
                else
-                       priv->dbgfs_sram_len = priv->ucode_rt.data.len;
+                       priv->dbgfs_sram_len = trans->ucode_rt.data.len;
        }
        len = priv->dbgfs_sram_len;
 
@@ -341,7 +342,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
 
        return simple_read_from_buffer(user_buf, count, ppos,
                                       priv->wowlan_sram,
-                                      priv->ucode_wowlan.data.len);
+                                      trans(priv)->ucode_wowlan.data.len);
 }
 static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
                                        size_t count, loff_t *ppos)
@@ -430,7 +431,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
        eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
        pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
                        "version: 0x%x\n",
-                       (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+                       (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
                         ? "OTP" : "EEPROM", eeprom_ver);
        for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
                pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
index 6c00a44..6f6a647 100644 (file)
@@ -230,17 +230,6 @@ struct iwl_vif_priv {
        u8 ibss_bssid_sta_id;
 };
 
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
-       void *v_addr;           /* access by driver */
-       dma_addr_t p_addr;      /* access by card's busmaster DMA */
-       u32 len;                /* bytes */
-};
-
-struct fw_img {
-       struct fw_desc code, data;
-};
-
 /* v1/v2 uCode file layout */
 struct iwl_ucode_header {
        __le32 ver;     /* major/minor/API/serial */
@@ -453,26 +442,12 @@ enum iwlagn_chain_noise_state {
 };
 
 
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
-       IWL_CALIB_XTAL,
-       IWL_CALIB_DC,
-       IWL_CALIB_LO,
-       IWL_CALIB_TX_IQ,
-       IWL_CALIB_TX_IQ_PERD,
-       IWL_CALIB_BASE_BAND,
-       IWL_CALIB_TEMP_OFFSET,
-       IWL_CALIB_MAX
-};
-
 /* Opaque calibration results */
 struct iwl_calib_result {
-       void *buf;
-       size_t buf_len;
+       struct list_head list;
+       size_t cmd_len;
+       struct iwl_calib_hdr hdr;
+       /* data follows */
 };
 
 /* Sensitivity calib data */
@@ -714,35 +689,6 @@ struct iwl_force_reset {
  */
 #define IWLAGN_EXT_BEACON_TIME_POS     22
 
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
-       struct list_head list;
-
-       void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
-                  void *data);
-       void *fn_data;
-
-       u8 cmd;
-       bool triggered, aborted;
-};
-
 struct iwl_rxon_context {
        struct ieee80211_vif *vif;
 
@@ -805,13 +751,6 @@ enum iwl_scan_type {
        IWL_SCAN_ROC,
 };
 
-enum iwlagn_ucode_type {
-       IWL_UCODE_NONE,
-       IWL_UCODE_REGULAR,
-       IWL_UCODE_INIT,
-       IWL_UCODE_WOWLAN,
-};
-
 #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
 struct iwl_testmode_trace {
        u32 buff_size;
@@ -822,8 +761,20 @@ struct iwl_testmode_trace {
        dma_addr_t dma_addr;
        bool trace_enabled;
 };
+struct iwl_testmode_sram {
+       u32 buff_size;
+       u32 num_chunks;
+       u8 *buff_addr;
+       bool sram_readed;
+};
 #endif
 
+struct iwl_wipan_noa_data {
+       struct rcu_head rcu_head;
+       u32 length;
+       u8 data[];
+};
+
 struct iwl_priv {
 
        /*data shared among all the driver's layers */
@@ -881,7 +832,9 @@ struct iwl_priv {
        s32 last_temperature;
 
        /* init calibration results */
-       struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+       struct list_head calib_results;
+
+       struct iwl_wipan_noa_data __rcu *noa_data;
 
        /* Scan related variables */
        unsigned long scan_start;
@@ -907,12 +860,6 @@ struct iwl_priv {
        u32 ucode_ver;                  /* version of ucode, copy of
                                           iwl_ucode.ver */
 
-       struct fw_img ucode_rt;
-       struct fw_img ucode_init;
-       struct fw_img ucode_wowlan;
-
-       enum iwlagn_ucode_type ucode_type;
-       u8 ucode_write_complete;        /* the image write is complete */
        char firmware_name[25];
 
        struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
@@ -959,7 +906,6 @@ struct iwl_priv {
 
        /* eeprom -- this is in the card's little endian byte order */
        u8 *eeprom;
-       int    nvm_device_type;
        struct iwl_eeprom_calib_info *calib_info;
 
        enum nl80211_iftype iw_mode;
@@ -1017,10 +963,6 @@ struct iwl_priv {
        /* counts reply_tx error */
        struct reply_tx_error_statistics reply_tx_stats;
        struct reply_agg_tx_error_statistics reply_agg_tx_stats;
-       /* notification wait support */
-       struct list_head notif_waits;
-       spinlock_t notif_wait_lock;
-       wait_queue_head_t notif_waitq;
 
        /* remain-on-channel offload support */
        struct ieee80211_channel *hw_roc_channel;
@@ -1100,6 +1042,7 @@ struct iwl_priv {
        bool led_registered;
 #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
        struct iwl_testmode_trace testmode_trace;
+       struct iwl_testmode_sram testmode_sram;
        u32 tm_fixed_rate;
 #endif
 
index a635a7e..2a2c8de 100644 (file)
@@ -28,7 +28,7 @@
 
 /* sparse doesn't like tracepoint macros */
 #ifndef __CHECKER__
-#include "iwl-dev.h"
+#include "iwl-trans.h"
 
 #define CREATE_TRACE_POINTS
 #include "iwl-devtrace.h"
index 8a51c5c..f9d3319 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <linux/tracepoint.h>
 
-struct iwl_priv;
 
 #if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
 #undef TRACE_EVENT
@@ -37,14 +36,14 @@ struct iwl_priv;
 static inline void trace_ ## name(proto) {}
 #endif
 
-#define PRIV_ENTRY     __field(struct iwl_priv *, priv)
+#define PRIV_ENTRY     __field(void *, priv)
 #define PRIV_ASSIGN    __entry->priv = priv
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM iwlwifi_io
 
 TRACE_EVENT(iwlwifi_dev_ioread32,
-       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+       TP_PROTO(void *priv, u32 offs, u32 val),
        TP_ARGS(priv, offs, val),
        TP_STRUCT__entry(
                PRIV_ENTRY
@@ -60,7 +59,7 @@ TRACE_EVENT(iwlwifi_dev_ioread32,
 );
 
 TRACE_EVENT(iwlwifi_dev_iowrite8,
-       TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+       TP_PROTO(void *priv, u32 offs, u8 val),
        TP_ARGS(priv, offs, val),
        TP_STRUCT__entry(
                PRIV_ENTRY
@@ -76,7 +75,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite8,
 );
 
 TRACE_EVENT(iwlwifi_dev_iowrite32,
-       TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+       TP_PROTO(void *priv, u32 offs, u32 val),
        TP_ARGS(priv, offs, val),
        TP_STRUCT__entry(
                PRIV_ENTRY
@@ -95,7 +94,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
 #define TRACE_SYSTEM iwlwifi_ucode
 
 TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
-       TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+       TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
        TP_ARGS(priv, time, data, ev),
        TP_STRUCT__entry(
                PRIV_ENTRY
@@ -115,7 +114,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
-       TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+       TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
        TP_ARGS(priv, wraps, n_entry, p_entry),
        TP_STRUCT__entry(
                PRIV_ENTRY
@@ -139,7 +138,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
 #define TRACE_SYSTEM iwlwifi
 
 TRACE_EVENT(iwlwifi_dev_hcmd,
-       TP_PROTO(struct iwl_priv *priv, u32 flags,
+       TP_PROTO(void *priv, u32 flags,
                 const void *hcmd0, size_t len0,
                 const void *hcmd1, size_t len1,
                 const void *hcmd2, size_t len2),
@@ -164,7 +163,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
 );
 
 TRACE_EVENT(iwlwifi_dev_rx,
-       TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+       TP_PROTO(void *priv, void *rxbuf, size_t len),
        TP_ARGS(priv, rxbuf, len),
        TP_STRUCT__entry(
                PRIV_ENTRY
@@ -179,7 +178,7 @@ TRACE_EVENT(iwlwifi_dev_rx,
 );
 
 TRACE_EVENT(iwlwifi_dev_tx,
-       TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+       TP_PROTO(void *priv, void *tfd, size_t tfdlen,
                 void *buf0, size_t buf0_len,
                 void *buf1, size_t buf1_len),
        TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
@@ -211,7 +210,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_error,
-       TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
+       TP_PROTO(void *priv, u32 desc, u32 tsf_low,
                 u32 data1, u32 data2, u32 line, u32 blink1,
                 u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
                 u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
@@ -271,7 +270,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
 );
 
 TRACE_EVENT(iwlwifi_dev_ucode_event,
-       TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+       TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
        TP_ARGS(priv, time, data, ev),
        TP_STRUCT__entry(
                PRIV_ENTRY
index a4e43bd..dcada08 100644 (file)
@@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = {       /* 5.2 ht40 channel */
  * EEPROM chip, not a single event, so even reads could conflict if they
  * weren't arbitrated by the semaphore.
  */
-static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
+static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
 {
        u16 count;
        int ret;
 
        for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
                /* Request semaphore */
-               iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+               iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
                            CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 
                /* See if we got it */
-               ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+               ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
                                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
                                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
                                EEPROM_SEM_TIMEOUT);
                if (ret >= 0) {
-                       IWL_DEBUG_EEPROM(priv,
+                       IWL_DEBUG_EEPROM(bus,
                                "Acquired semaphore after %d tries.\n",
                                count+1);
                        return ret;
@@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
        return ret;
 }
 
-static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
+static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
 {
-       iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+       iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
                CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
 
 }
 
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
 {
-       u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+       u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
        int ret = 0;
 
-       IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+       IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
        switch (gp) {
        case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
-               if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
-                       IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
+               if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+                       IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
                                gp);
                        ret = -ENOENT;
                }
                break;
        case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
        case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
-               if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
-                       IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
+               if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+                       IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
                        ret = -ENOENT;
                }
                break;
        case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
        default:
-               IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
+               IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
                        "EEPROM_GP=0x%08x\n",
-                       (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+                       (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
                        ? "OTP" : "EEPROM", gp);
                ret = -ENOENT;
                break;
@@ -302,19 +302,19 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
  *
 ******************************************************************************/
 
-static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
+static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
 {
-       iwl_read32(bus(priv), CSR_OTP_GP_REG);
+       iwl_read32(bus, CSR_OTP_GP_REG);
 
        if (mode == IWL_OTP_ACCESS_ABSOLUTE)
-               iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
+               iwl_clear_bit(bus, CSR_OTP_GP_REG,
                              CSR_OTP_GP_REG_OTP_ACCESS_MODE);
        else
-               iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+               iwl_set_bit(bus, CSR_OTP_GP_REG,
                            CSR_OTP_GP_REG_OTP_ACCESS_MODE);
 }
 
-static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
 {
        u32 otpgp;
        int nvm_type;
@@ -322,7 +322,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
        /* OTP only valid for CP/PP and after */
        switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
        case CSR_HW_REV_TYPE_NONE:
-               IWL_ERR(priv, "Unknown hardware type\n");
+               IWL_ERR(bus, "Unknown hardware type\n");
                return -ENOENT;
        case CSR_HW_REV_TYPE_5300:
        case CSR_HW_REV_TYPE_5350:
@@ -331,7 +331,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
                nvm_type = NVM_DEVICE_TYPE_EEPROM;
                break;
        default:
-               otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+               otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
                if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
                        nvm_type = NVM_DEVICE_TYPE_OTP;
                else
@@ -341,73 +341,73 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
        return  nvm_type;
 }
 
-static int iwl_init_otp_access(struct iwl_priv *priv)
+static int iwl_init_otp_access(struct iwl_bus *bus)
 {
        int ret;
 
        /* Enable 40MHz radio clock */
-       iwl_write32(bus(priv), CSR_GP_CNTRL,
-                   iwl_read32(bus(priv), CSR_GP_CNTRL) |
+       iwl_write32(bus, CSR_GP_CNTRL,
+                   iwl_read32(bus, CSR_GP_CNTRL) |
                    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
        /* wait for clock to be ready */
-       ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
+       ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
                                 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
                                 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
                                 25000);
        if (ret < 0)
-               IWL_ERR(priv, "Time out access OTP\n");
+               IWL_ERR(bus, "Time out access OTP\n");
        else {
-               iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+               iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
                                  APMG_PS_CTRL_VAL_RESET_REQ);
                udelay(5);
-               iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+               iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
                                    APMG_PS_CTRL_VAL_RESET_REQ);
 
                /*
                 * CSR auto clock gate disable bit -
                 * this is only applicable for HW with OTP shadow RAM
                 */
-               if (priv->cfg->base_params->shadow_ram_support)
-                       iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
+               if (priv(bus)->cfg->base_params->shadow_ram_support)
+                       iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
                                CSR_RESET_LINK_PWR_MGMT_DISABLED);
        }
        return ret;
 }
 
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
 {
        int ret = 0;
        u32 r;
        u32 otpgp;
 
-       iwl_write32(bus(priv), CSR_EEPROM_REG,
+       iwl_write32(bus, CSR_EEPROM_REG,
                    CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-       ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
+       ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
                                 CSR_EEPROM_REG_READ_VALID_MSK,
                                 CSR_EEPROM_REG_READ_VALID_MSK,
                                 IWL_EEPROM_ACCESS_TIMEOUT);
        if (ret < 0) {
-               IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
+               IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
                return ret;
        }
-       r = iwl_read32(bus(priv), CSR_EEPROM_REG);
+       r = iwl_read32(bus, CSR_EEPROM_REG);
        /* check for ECC errors: */
-       otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+       otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
        if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
                /* stop in this case */
                /* set the uncorrectable OTP ECC bit for acknowledgement */
-               iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+               iwl_set_bit(bus, CSR_OTP_GP_REG,
                        CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
-               IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
+               IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
                return -EINVAL;
        }
        if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
                /* continue in this case */
                /* set the correctable OTP ECC bit for acknowledgement */
-               iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+               iwl_set_bit(bus, CSR_OTP_GP_REG,
                                CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
-               IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+               IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
        }
        *eeprom_data = cpu_to_le16(r >> 16);
        return 0;
@@ -416,20 +416,20 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
 /*
  * iwl_is_otp_empty: check for empty OTP
  */
-static bool iwl_is_otp_empty(struct iwl_priv *priv)
+static bool iwl_is_otp_empty(struct iwl_bus *bus)
 {
        u16 next_link_addr = 0;
        __le16 link_value;
        bool is_empty = false;
 
        /* locate the beginning of OTP link list */
-       if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
+       if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
                if (!link_value) {
-                       IWL_ERR(priv, "OTP is empty\n");
+                       IWL_ERR(bus, "OTP is empty\n");
                        is_empty = true;
                }
        } else {
-               IWL_ERR(priv, "Unable to read first block of OTP list.\n");
+               IWL_ERR(bus, "Unable to read first block of OTP list.\n");
                is_empty = true;
        }
 
@@ -446,7 +446,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
  *   we should read and used to configure the device.
  *   only perform this operation if shadow RAM is disabled
  */
-static int iwl_find_otp_image(struct iwl_priv *priv,
+static int iwl_find_otp_image(struct iwl_bus *bus,
                                        u16 *validblockaddr)
 {
        u16 next_link_addr = 0, valid_addr;
@@ -454,10 +454,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
        int usedblocks = 0;
 
        /* set addressing mode to absolute to traverse the link list */
-       iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
+       iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
 
        /* checking for empty OTP or error */
-       if (iwl_is_otp_empty(priv))
+       if (iwl_is_otp_empty(bus))
                return -EINVAL;
 
        /*
@@ -471,9 +471,9 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
                 */
                valid_addr = next_link_addr;
                next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
-               IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
+               IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
                               usedblocks, next_link_addr);
-               if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+               if (iwl_read_otp_word(bus, next_link_addr, &link_value))
                        return -EINVAL;
                if (!link_value) {
                        /*
@@ -488,10 +488,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
                }
                /* more in the link list, continue */
                usedblocks++;
-       } while (usedblocks <= priv->cfg->base_params->max_ll_items);
+       } while (usedblocks <= priv(bus)->cfg->base_params->max_ll_items);
 
        /* OTP has no valid blocks */
-       IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
+       IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
        return -EINVAL;
 }
 
@@ -504,28 +504,28 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
  * iwl_get_max_txpower_avg - get the highest tx power from all chains.
  *     find the highest tx power from all chains for the channel
  */
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
                struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
                int element, s8 *max_txpower_in_half_dbm)
 {
        s8 max_txpower_avg = 0; /* (dBm) */
 
        /* Take the highest tx power from any valid chains */
-       if ((priv->cfg->valid_tx_ant & ANT_A) &&
+       if ((cfg->valid_tx_ant & ANT_A) &&
            (enhanced_txpower[element].chain_a_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_a_max;
-       if ((priv->cfg->valid_tx_ant & ANT_B) &&
+       if ((cfg->valid_tx_ant & ANT_B) &&
            (enhanced_txpower[element].chain_b_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_b_max;
-       if ((priv->cfg->valid_tx_ant & ANT_C) &&
+       if ((cfg->valid_tx_ant & ANT_C) &&
            (enhanced_txpower[element].chain_c_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].chain_c_max;
-       if (((priv->cfg->valid_tx_ant == ANT_AB) |
-           (priv->cfg->valid_tx_ant == ANT_BC) |
-           (priv->cfg->valid_tx_ant == ANT_AC)) &&
+       if (((cfg->valid_tx_ant == ANT_AB) |
+           (cfg->valid_tx_ant == ANT_BC) |
+           (cfg->valid_tx_ant == ANT_AC)) &&
            (enhanced_txpower[element].mimo2_max > max_txpower_avg))
                max_txpower_avg =  enhanced_txpower[element].mimo2_max;
-       if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+       if ((cfg->valid_tx_ant == ANT_ABC) &&
            (enhanced_txpower[element].mimo3_max > max_txpower_avg))
                max_txpower_avg = enhanced_txpower[element].mimo3_max;
 
@@ -627,7 +627,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
                                 ((txp->delta_20_in_40 & 0xf0) >> 4),
                                 (txp->delta_20_in_40 & 0x0f));
 
-               max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+               max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
                                                      &max_txp_avg_halfdbm);
 
                /*
@@ -660,8 +660,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
        u16 validblockaddr = 0;
        u16 cache_addr = 0;
 
-       priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
-       if (priv->nvm_device_type == -ENOENT)
+       trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
+       if (trans(priv)->nvm_device_type == -ENOENT)
                return -ENOENT;
        /* allocate eeprom */
        sz = priv->cfg->base_params->eeprom_size;
@@ -675,7 +675,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
 
        iwl_apm_init(priv);
 
-       ret = iwl_eeprom_verify_signature(priv);
+       ret = iwl_eeprom_verify_signature(trans(priv));
        if (ret < 0) {
                IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
                ret = -ENOENT;
@@ -683,16 +683,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
        }
 
        /* Make sure driver (instead of uCode) is allowed to read EEPROM */
-       ret = iwl_eeprom_acquire_semaphore(priv);
+       ret = iwl_eeprom_acquire_semaphore(bus(priv));
        if (ret < 0) {
                IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
                ret = -ENOENT;
                goto err;
        }
 
-       if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+       if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
 
-               ret = iwl_init_otp_access(priv);
+               ret = iwl_init_otp_access(bus(priv));
                if (ret) {
                        IWL_ERR(priv, "Failed to initialize OTP access.\n");
                        ret = -ENOENT;
@@ -707,7 +707,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
                             CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
                /* traversing the linked list if no shadow ram supported */
                if (!priv->cfg->base_params->shadow_ram_support) {
-                       if (iwl_find_otp_image(priv, &validblockaddr)) {
+                       if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
                                ret = -ENOENT;
                                goto done;
                        }
@@ -716,7 +716,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
                     addr += sizeof(u16)) {
                        __le16 eeprom_data;
 
-                       ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+                       ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
                        if (ret)
                                goto done;
                        e[cache_addr / 2] = eeprom_data;
@@ -744,13 +744,13 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
        }
 
        IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
-                      (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+                      (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
                       ? "OTP" : "EEPROM",
                       iwl_eeprom_query16(priv, EEPROM_VERSION));
 
        ret = 0;
 done:
-       iwl_eeprom_release_semaphore(priv);
+       iwl_eeprom_release_semaphore(bus(priv));
 
 err:
        if (ret)
index 3ffa8e6..3464cad 100644 (file)
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
 
        spin_lock_irqsave(&bus->reg_lock, flags);
        iwl_grab_nic_access(bus);
-       value = iwl_read32(bus(bus), reg);
+       value = iwl_read32(bus, reg);
        iwl_release_nic_access(bus);
        spin_unlock_irqrestore(&bus->reg_lock, flags);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
new file mode 100644 (file)
index 0000000..55308b8
--- /dev/null
@@ -0,0 +1,1639 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
+#include "iwl-bus.h"
+#include "iwl-trans.h"
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP),
+       },
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_GO) |
+                        BIT(NL80211_IFTYPE_AP),
+       },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+       { .num_different_channels = 1,
+         .max_interfaces = 2,
+         .beacon_int_infra_match = true,
+         .limits = iwlagn_sta_ap_limits,
+         .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+       },
+       { .num_different_channels = 1,
+         .max_interfaces = 2,
+         .limits = iwlagn_2sta_limits,
+         .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+       },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+       { .num_different_channels = 1,
+         .max_interfaces = 2,
+         .beacon_int_infra_match = true,
+         .limits = iwlagn_p2p_sta_go_limits,
+         .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+       },
+       { .num_different_channels = 1,
+         .max_interfaces = 2,
+         .limits = iwlagn_p2p_2sta_limits,
+         .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+       },
+};
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+                                 struct iwlagn_ucode_capabilities *capa)
+{
+       int ret;
+       struct ieee80211_hw *hw = priv->hw;
+       struct iwl_rxon_context *ctx;
+
+       hw->rate_control_algorithm = "iwl-agn-rs";
+
+       /* Tell mac80211 our characteristics */
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_AMPDU_AGGREGATION |
+                   IEEE80211_HW_NEED_DTIM_PERIOD |
+                   IEEE80211_HW_SPECTRUM_MGMT |
+                   IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+       /*
+        * Including the following line will crash some AP's.  This
+        * workaround removes the stimulus which causes the crash until
+        * the AP software can be fixed.
+       hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+        */
+
+       hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+       if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+               hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+                            IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+       if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+               hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+       hw->sta_data_size = sizeof(struct iwl_station_priv);
+       hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+       for_each_context(priv, ctx) {
+               hw->wiphy->interface_modes |= ctx->interface_modes;
+               hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+       }
+
+       BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+       if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+               hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+               hw->wiphy->n_iface_combinations =
+                       ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+       } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+               hw->wiphy->iface_combinations =
+                       iwlagn_iface_combinations_dualmode;
+               hw->wiphy->n_iface_combinations =
+                       ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+       }
+
+       hw->wiphy->max_remain_on_channel_duration = 1000;
+
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+                           WIPHY_FLAG_DISABLE_BEACON_HINTS |
+                           WIPHY_FLAG_IBSS_RSN;
+
+       if (trans(priv)->ucode_wowlan.code.len &&
+           device_can_wakeup(bus(priv)->dev)) {
+               hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+                                         WIPHY_WOWLAN_DISCONNECT |
+                                         WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+                                         WIPHY_WOWLAN_RFKILL_RELEASE;
+               if (!iwlagn_mod_params.sw_crypto)
+                       hw->wiphy->wowlan.flags |=
+                               WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+                               WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+               hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+               hw->wiphy->wowlan.pattern_min_len =
+                                       IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+               hw->wiphy->wowlan.pattern_max_len =
+                                       IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+       }
+
+       if (iwlagn_mod_params.power_save)
+               hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+       else
+               hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+       /* we create the 802.11 header and a zero-length SSID element */
+       hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+
+       /* Default value; 4 EDCA QOS priorities */
+       hw->queues = 4;
+
+       hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+       if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+                       &priv->bands[IEEE80211_BAND_2GHZ];
+       if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+                       &priv->bands[IEEE80211_BAND_5GHZ];
+
+       iwl_leds_init(priv);
+
+       ret = ieee80211_register_hw(priv->hw);
+       if (ret) {
+               IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+               return ret;
+       }
+       priv->mac80211_registered = 1;
+
+       return 0;
+}
+
+static int __iwl_up(struct iwl_priv *priv)
+{
+       struct iwl_rxon_context *ctx;
+       int ret;
+
+       lockdep_assert_held(&priv->shrd->mutex);
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+               IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+               return -EIO;
+       }
+
+       for_each_context(priv, ctx) {
+               ret = iwlagn_alloc_bcast_station(priv, ctx);
+               if (ret) {
+                       iwl_dealloc_bcast_stations(priv);
+                       return ret;
+               }
+       }
+
+       ret = iwlagn_run_init_ucode(priv);
+       if (ret) {
+               IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
+               goto error;
+       }
+
+       ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
+               goto error;
+       }
+
+       ret = iwl_alive_start(priv);
+       if (ret)
+               goto error;
+       return 0;
+
+ error:
+       set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+       __iwl_down(priv);
+       clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+
+       IWL_ERR(priv, "Unable to initialize device.\n");
+       return ret;
+}
+
+static int iwlagn_mac_start(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       /* we should be verifying the device is ready to be opened */
+       mutex_lock(&priv->shrd->mutex);
+       ret = __iwl_up(priv);
+       mutex_unlock(&priv->shrd->mutex);
+       if (ret)
+               return ret;
+
+       IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+       /* Now we should be done, and the READY bit should be set. */
+       if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+               ret = -EIO;
+
+       iwlagn_led_enable(priv);
+
+       priv->is_open = 1;
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+}
+
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!priv->is_open)
+               return;
+
+       priv->is_open = 0;
+
+       iwl_down(priv);
+
+       flush_workqueue(priv->shrd->workqueue);
+
+       /* User space software may expect getting rfkill changes
+        * even if interface is down */
+       iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
+       iwl_enable_rfkill_int(priv);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     struct cfg80211_gtk_rekey_data *data)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       if (iwlagn_mod_params.sw_crypto)
+               return;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+               goto out;
+
+       memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+       memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+       priv->replay_ctr =
+               cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+       priv->have_rekey_data = true;
+
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+                             struct cfg80211_wowlan *wowlan)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       int ret;
+
+       if (WARN_ON(!wowlan))
+               return -EINVAL;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       /* Don't attempt WoWLAN when not associated, tear down instead. */
+       if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+           !iwl_is_associated_ctx(ctx)) {
+               ret = 1;
+               goto out;
+       }
+
+       ret = iwlagn_suspend(priv, hw, wowlan);
+       if (ret)
+               goto error;
+
+       device_set_wakeup_enable(bus(priv)->dev, true);
+
+       /* Now let the ucode operate on its own */
+       iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+                         CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+       goto out;
+
+ error:
+       priv->shrd->wowlan = false;
+       iwlagn_prepare_restart(priv);
+       ieee80211_restart_hw(priv->hw);
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct ieee80211_vif *vif;
+       unsigned long flags;
+       u32 base, status = 0xffffffff;
+       int ret = -EIO;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+                         CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+       base = priv->device_pointers.error_event_table;
+       if (iwlagn_hw_valid_rtc_data_addr(base)) {
+               spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+               ret = iwl_grab_nic_access_silent(bus(priv));
+               if (ret == 0) {
+                       iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+                       status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+                       iwl_release_nic_access(bus(priv));
+               }
+               spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               if (ret == 0) {
+                       struct iwl_trans *trans = trans(priv);
+                       if (!priv->wowlan_sram)
+                               priv->wowlan_sram =
+                                       kzalloc(trans->ucode_wowlan.data.len,
+                                               GFP_KERNEL);
+
+                       if (priv->wowlan_sram)
+                               _iwl_read_targ_mem_words(
+                                       bus(priv), 0x800000, priv->wowlan_sram,
+                                       trans->ucode_wowlan.data.len / 4);
+               }
+#endif
+       }
+
+       /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+       vif = ctx->vif;
+
+       priv->shrd->wowlan = false;
+
+       device_set_wakeup_enable(bus(priv)->dev, false);
+
+       iwlagn_prepare_restart(priv);
+
+       memset((void *)&ctx->active, 0, sizeof(ctx->active));
+       iwl_connection_init_rx_config(priv, ctx);
+       iwlagn_set_rxon_chain(priv, ctx);
+
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       ieee80211_resume_disconnect(vif);
+
+       return 1;
+}
+
+#endif
+
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+                    ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+       if (iwlagn_tx_skb(priv, skb))
+               dev_kfree_skb_any(skb);
+}
+
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_key_conf *keyconf,
+                                      struct ieee80211_sta *sta,
+                                      u32 iv32, u16 *phase1key)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
+}
+
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_sta *sta,
+                             struct ieee80211_key_conf *key)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *ctx = vif_priv->ctx;
+       int ret;
+       bool is_default_wep_key = false;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (iwlagn_mod_params.sw_crypto) {
+               IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               break;
+       default:
+               break;
+       }
+
+       /*
+        * We could program these keys into the hardware as well, but we
+        * don't expect much multicast traffic in IBSS and having keys
+        * for more stations is probably more useful.
+        *
+        * Mark key TX-only and return 0.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC &&
+           !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+               key->hw_key_idx = WEP_INVALID_OFFSET;
+               return 0;
+       }
+
+       /* If they key was TX-only, accept deletion */
+       if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+               return 0;
+
+       mutex_lock(&priv->shrd->mutex);
+       iwl_scan_cancel_timeout(priv, 100);
+
+       BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
+       /*
+        * If we are getting WEP group key and we didn't receive any key mapping
+        * so far, we are in legacy wep mode (group key only), otherwise we are
+        * in 1X mode.
+        * In legacy wep mode, we use another host command to the uCode.
+        */
+       if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+            key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+               if (cmd == SET_KEY)
+                       is_default_wep_key = !ctx->key_mapping_keys;
+               else
+                       is_default_wep_key =
+                               key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
+       }
+
+
+       switch (cmd) {
+       case SET_KEY:
+               if (is_default_wep_key) {
+                       ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
+                       break;
+               }
+               ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+               if (ret) {
+                       /*
+                        * can't add key for RX, but we don't need it
+                        * in the device for TX so still return 0
+                        */
+                       ret = 0;
+                       key->hw_key_idx = WEP_INVALID_OFFSET;
+               }
+
+               IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+               break;
+       case DISABLE_KEY:
+               if (is_default_wep_key)
+                       ret = iwl_remove_default_wep_key(priv, ctx, key);
+               else
+                       ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
+
+               IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  enum ieee80211_ampdu_mlme_action action,
+                                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                                  u8 buf_size)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret = -EINVAL;
+       struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+       struct iwl_rxon_context *ctx =  iwl_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+                    sta->addr, tid);
+
+       if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
+               return -EACCES;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               IWL_DEBUG_HT(priv, "start Rx\n");
+               ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               IWL_DEBUG_HT(priv, "stop Rx\n");
+               ret = iwl_sta_rx_agg_stop(priv, sta, tid);
+               if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+                       ret = 0;
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               IWL_DEBUG_HT(priv, "start Tx\n");
+               ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+               break;
+       case IEEE80211_AMPDU_TX_STOP:
+               IWL_DEBUG_HT(priv, "stop Tx\n");
+               ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+               if ((ret == 0) && (priv->agg_tids_count > 0)) {
+                       priv->agg_tids_count--;
+                       IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+                                    priv->agg_tids_count);
+               }
+               if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+                       ret = 0;
+               if (!priv->agg_tids_count && priv->cfg->ht_params &&
+                   priv->cfg->ht_params->use_rts_for_aggregation) {
+                       /*
+                        * switch off RTS/CTS if it was previously enabled
+                        */
+                       sta_priv->lq_sta.lq.general_params.flags &=
+                               ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+                       iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+                                       &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+               }
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
+
+               iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
+                               tid, buf_size);
+
+               /*
+                * If the limit is 0, then it wasn't initialised yet,
+                * use the default. We can do that since we take the
+                * minimum below, and we don't want to go above our
+                * default due to hardware restrictions.
+                */
+               if (sta_priv->max_agg_bufsize == 0)
+                       sta_priv->max_agg_bufsize =
+                               LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+               /*
+                * Even though in theory the peer could have different
+                * aggregation reorder buffer sizes for different sessions,
+                * our ucode doesn't allow for that and has a global limit
+                * for each station. Therefore, use the minimum of all the
+                * aggregation sessions and our default value.
+                */
+               sta_priv->max_agg_bufsize =
+                       min(sta_priv->max_agg_bufsize, buf_size);
+
+               if (priv->cfg->ht_params &&
+                   priv->cfg->ht_params->use_rts_for_aggregation) {
+                       /*
+                        * switch to RTS/CTS if it is the prefer protection
+                        * method for HT traffic
+                        */
+
+                       sta_priv->lq_sta.lq.general_params.flags |=
+                               LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+               }
+               priv->agg_tids_count++;
+               IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+                            priv->agg_tids_count);
+
+               sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+                       sta_priv->max_agg_bufsize;
+
+               iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+                               &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+
+               IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+                        sta->addr, tid);
+               ret = 0;
+               break;
+       }
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return ret;
+}
+
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+       int ret = 0;
+       u8 sta_id;
+
+       IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
+                       sta->addr);
+       mutex_lock(&priv->shrd->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+                       sta->addr);
+       sta_priv->sta_id = IWL_INVALID_STATION;
+
+       atomic_set(&sta_priv->pending_frames, 0);
+       if (vif->type == NL80211_IFTYPE_AP)
+               sta_priv->client = true;
+
+       ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
+                                    is_ap, sta, &sta_id);
+       if (ret) {
+               IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+                       sta->addr, ret);
+               /* Should we return success if return code is EEXIST ? */
+               goto out;
+       }
+
+       sta_priv->sta_id = sta_id;
+
+       /* Initialize rate scaling */
+       IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+                      sta->addr);
+       iwl_rs_rate_init(priv, sta, sta_id);
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+                               struct ieee80211_channel_switch *ch_switch)
+{
+       struct iwl_priv *priv = hw->priv;
+       const struct iwl_channel_info *ch_info;
+       struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_channel *channel = ch_switch->channel;
+       struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+       /*
+        * MULTI-FIXME
+        * When we add support for multiple interfaces, we need to
+        * revisit this. The channel switch command in the device
+        * only affects the BSS context, but what does that really
+        * mean? And what if we get a CSA on the second interface?
+        * This needs a lot of work.
+        */
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       u16 ch;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       mutex_lock(&priv->shrd->mutex);
+
+       if (iwl_is_rfkill(priv->shrd))
+               goto out;
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+           test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+           test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+               goto out;
+
+       if (!iwl_is_associated_ctx(ctx))
+               goto out;
+
+       if (!priv->cfg->lib->set_channel_switch)
+               goto out;
+
+       ch = channel->hw_value;
+       if (le16_to_cpu(ctx->active.channel) == ch)
+               goto out;
+
+       ch_info = iwl_get_channel_info(priv, channel->band, ch);
+       if (!is_channel_valid(ch_info)) {
+               IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+               goto out;
+       }
+
+       spin_lock_irq(&priv->shrd->lock);
+
+       priv->current_ht_config.smps = conf->smps_mode;
+
+       /* Configure HT40 channels */
+       ctx->ht.enabled = conf_is_ht(conf);
+       if (ctx->ht.enabled) {
+               if (conf_is_ht40_minus(conf)) {
+                       ctx->ht.extension_chan_offset =
+                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+                       ctx->ht.is_40mhz = true;
+               } else if (conf_is_ht40_plus(conf)) {
+                       ctx->ht.extension_chan_offset =
+                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+                       ctx->ht.is_40mhz = true;
+               } else {
+                       ctx->ht.extension_chan_offset =
+                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
+                       ctx->ht.is_40mhz = false;
+               }
+       } else
+               ctx->ht.is_40mhz = false;
+
+       if ((le16_to_cpu(ctx->staging.channel) != ch))
+               ctx->staging.flags = 0;
+
+       iwl_set_rxon_channel(priv, channel, ctx);
+       iwl_set_rxon_ht(priv, ht_conf);
+       iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+       spin_unlock_irq(&priv->shrd->lock);
+
+       iwl_set_rate(priv);
+       /*
+        * at this point, staging_rxon has the
+        * configuration for channel switch
+        */
+       set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+       priv->switch_channel = cpu_to_le16(ch);
+       if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
+               clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+               priv->switch_channel = 0;
+               ieee80211_chswitch_done(ctx->vif, false);
+       }
+
+out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+                                   unsigned int changed_flags,
+                                   unsigned int *total_flags,
+                                   u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+       struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->shrd->mutex);
+
+       for_each_context(priv, ctx) {
+               ctx->staging.filter_flags &= ~filter_nand;
+               ctx->staging.filter_flags |= filter_or;
+
+               /*
+                * Not committing directly because hardware can perform a scan,
+                * but we'll eventually commit the filter flags change anyway.
+                */
+       }
+
+       mutex_unlock(&priv->shrd->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       mutex_lock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+               IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
+               goto done;
+       }
+       if (iwl_is_rfkill(priv->shrd)) {
+               IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
+               goto done;
+       }
+
+       /*
+        * mac80211 will not push any more frames for transmit
+        * until the flush is completed
+        */
+       if (drop) {
+               IWL_DEBUG_MAC80211(priv, "send flush command\n");
+               if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+                       IWL_ERR(priv, "flush request fail\n");
+                       goto done;
+               }
+       }
+       IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+       iwl_trans_wait_tx_queue_empty(trans(priv));
+done:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+                                    struct ieee80211_channel *channel,
+                                    enum nl80211_channel_type channel_type,
+                                    int duration)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+       int err = 0;
+
+       if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+               return -EOPNOTSUPP;
+
+       if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
+               return -EOPNOTSUPP;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       priv->hw_roc_channel = channel;
+       priv->hw_roc_chantype = channel_type;
+       /* convert from ms to TU */
+       priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
+       priv->hw_roc_start_notified = false;
+       cancel_delayed_work(&priv->hw_roc_disable_work);
+
+       if (!ctx->is_active) {
+               static const struct iwl_qos_info default_qos_data = {
+                       .def_qos_parm = {
+                               .ac[0] = {
+                                       .cw_min = cpu_to_le16(3),
+                                       .cw_max = cpu_to_le16(7),
+                                       .aifsn = 2,
+                                       .edca_txop = cpu_to_le16(1504),
+                               },
+                               .ac[1] = {
+                                       .cw_min = cpu_to_le16(7),
+                                       .cw_max = cpu_to_le16(15),
+                                       .aifsn = 2,
+                                       .edca_txop = cpu_to_le16(3008),
+                               },
+                               .ac[2] = {
+                                       .cw_min = cpu_to_le16(15),
+                                       .cw_max = cpu_to_le16(1023),
+                                       .aifsn = 3,
+                               },
+                               .ac[3] = {
+                                       .cw_min = cpu_to_le16(15),
+                                       .cw_max = cpu_to_le16(1023),
+                                       .aifsn = 7,
+                               },
+                       },
+               };
+
+               ctx->is_active = true;
+               ctx->qos_data = default_qos_data;
+               ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+               memcpy(ctx->staging.node_addr,
+                      priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+                      ETH_ALEN);
+               memcpy(ctx->staging.bssid_addr,
+                      priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+                      ETH_ALEN);
+               err = iwlagn_commit_rxon(priv, ctx);
+               if (err)
+                       goto out;
+               ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+                                            RXON_FILTER_PROMISC_MSK |
+                                            RXON_FILTER_CTL2HOST_MSK;
+
+               err = iwlagn_commit_rxon(priv, ctx);
+               if (err) {
+                       iwlagn_disable_roc(priv);
+                       goto out;
+               }
+               priv->hw_roc_setup = true;
+       }
+
+       err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+       if (err)
+               iwlagn_disable_roc(priv);
+
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return err;
+}
+
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+               return -EOPNOTSUPP;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+       iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
+       iwlagn_disable_roc(priv);
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return 0;
+}
+
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             const u8 *bssid,
+                             enum ieee80211_tx_sync_type type)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *ctx = vif_priv->ctx;
+       int ret;
+       u8 sta_id;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       if (iwl_is_associated_ctx(ctx)) {
+               ret = 0;
+               goto out;
+       }
+
+       if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
+           &priv->shrd->status)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+       if (ret)
+               goto out;
+
+       if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+               ret = -EIO;
+               goto out_remove_sta;
+       }
+
+       memcpy(ctx->bssid, bssid, ETH_ALEN);
+       ctx->preauth_bssid = true;
+
+       ret = iwlagn_commit_rxon(priv, ctx);
+
+       if (ret == 0)
+               goto out;
+
+ out_remove_sta:
+       iwl_remove_station(priv, sta_id, bssid);
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  const u8 *bssid,
+                                  enum ieee80211_tx_sync_type type)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       if (iwl_is_associated_ctx(ctx))
+               goto out;
+
+       iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+       ctx->preauth_bssid = false;
+       /* no need to commit */
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+                          enum ieee80211_rssi_event rssi_event)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+       mutex_lock(&priv->shrd->mutex);
+
+       if (priv->cfg->bt_params &&
+                       priv->cfg->bt_params->advanced_bt_coexist) {
+               if (rssi_event == RSSI_EVENT_LOW)
+                       priv->bt_enable_pspoll = true;
+               else if (rssi_event == RSSI_EVENT_HIGH)
+                       priv->bt_enable_pspoll = false;
+
+               iwlagn_send_advance_bt_config(priv);
+       } else {
+               IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+                               "ignoring RSSI callback\n");
+       }
+
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+                          struct ieee80211_sta *sta, bool set)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+       return 0;
+}
+
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif, u16 queue,
+                   const struct ieee80211_tx_queue_params *params)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *ctx = vif_priv->ctx;
+       unsigned long flags;
+       int q;
+
+       if (WARN_ON(!ctx))
+               return -EINVAL;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (!iwl_is_ready_rf(priv->shrd)) {
+               IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+               return -EIO;
+       }
+
+       if (queue >= AC_NUM) {
+               IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+               return 0;
+       }
+
+       q = AC_NUM - 1 - queue;
+
+       spin_lock_irqsave(&priv->shrd->lock, flags);
+
+       ctx->qos_data.def_qos_parm.ac[q].cw_min =
+               cpu_to_le16(params->cw_min);
+       ctx->qos_data.def_qos_parm.ac[q].cw_max =
+               cpu_to_le16(params->cw_max);
+       ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+       ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+                       cpu_to_le16((params->txop * 32));
+
+       ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+       spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return 0;
+}
+
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       struct iwl_priv *priv = hw->priv;
+
+       return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+       iwl_connection_init_rx_config(priv, ctx);
+
+       iwlagn_set_rxon_chain(priv, ctx);
+
+       return iwlagn_commit_rxon(priv, ctx);
+}
+
+static int iwl_setup_interface(struct iwl_priv *priv,
+                              struct iwl_rxon_context *ctx)
+{
+       struct ieee80211_vif *vif = ctx->vif;
+       int err;
+
+       lockdep_assert_held(&priv->shrd->mutex);
+
+       /*
+        * This variable will be correct only when there's just
+        * a single context, but all code using it is for hardware
+        * that supports only one context.
+        */
+       priv->iw_mode = vif->type;
+
+       ctx->is_active = true;
+
+       err = iwl_set_mode(priv, ctx);
+       if (err) {
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+               return err;
+       }
+
+       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
+           vif->type == NL80211_IFTYPE_ADHOC) {
+               /*
+                * pretend to have high BT traffic as long as we
+                * are operating in IBSS mode, as this will cause
+                * the rate scaling etc. to behave as intended.
+                */
+               priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+       }
+
+       return 0;
+}
+
+static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+       struct iwl_rxon_context *tmp, *ctx = NULL;
+       int err;
+       enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+
+       IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+                          viftype, vif->addr);
+
+       cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+       mutex_lock(&priv->shrd->mutex);
+
+       iwlagn_disable_roc(priv);
+
+       if (!iwl_is_ready_rf(priv->shrd)) {
+               IWL_WARN(priv, "Try to add interface when device not ready\n");
+               err = -EINVAL;
+               goto out;
+       }
+
+       for_each_context(priv, tmp) {
+               u32 possible_modes =
+                       tmp->interface_modes | tmp->exclusive_interface_modes;
+
+               if (tmp->vif) {
+                       /* check if this busy context is exclusive */
+                       if (tmp->exclusive_interface_modes &
+                                               BIT(tmp->vif->type)) {
+                               err = -EINVAL;
+                               goto out;
+                       }
+                       continue;
+               }
+
+               if (!(possible_modes & BIT(viftype)))
+                       continue;
+
+               /* have maybe usable context w/o interface */
+               ctx = tmp;
+               break;
+       }
+
+       if (!ctx) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       vif_priv->ctx = ctx;
+       ctx->vif = vif;
+
+       err = iwl_setup_interface(priv, ctx);
+       if (!err)
+               goto out;
+
+       ctx->vif = NULL;
+       priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+       return err;
+}
+
+static void iwl_teardown_interface(struct iwl_priv *priv,
+                                  struct ieee80211_vif *vif,
+                                  bool mode_change)
+{
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+       lockdep_assert_held(&priv->shrd->mutex);
+
+       if (priv->scan_vif == vif) {
+               iwl_scan_cancel_timeout(priv, 200);
+               iwl_force_scan_end(priv);
+       }
+
+       if (!mode_change) {
+               iwl_set_mode(priv, ctx);
+               if (!ctx->always_active)
+                       ctx->is_active = false;
+       }
+
+       /*
+        * When removing the IBSS interface, overwrite the
+        * BT traffic load with the stored one from the last
+        * notification, if any. If this is a device that
+        * doesn't implement this, this has no effect since
+        * both values are the same and zero.
+        */
+       if (vif->type == NL80211_IFTYPE_ADHOC)
+               priv->bt_traffic_load = priv->last_bt_traffic_load;
+}
+
+static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       mutex_lock(&priv->shrd->mutex);
+
+       if (WARN_ON(ctx->vif != vif)) {
+               struct iwl_rxon_context *tmp;
+               IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+               for_each_context(priv, tmp)
+                       IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+                               tmp->ctxid, tmp, tmp->vif);
+       }
+       ctx->vif = NULL;
+
+       iwl_teardown_interface(priv, vif, false);
+
+       mutex_unlock(&priv->shrd->mutex);
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+
+static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               enum nl80211_iftype newtype, bool newp2p)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+       struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+       struct iwl_rxon_context *tmp;
+       enum nl80211_iftype newviftype = newtype;
+       u32 interface_modes;
+       int err;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+       mutex_lock(&priv->shrd->mutex);
+
+       if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+               /*
+                * Huh? But wait ... this can maybe happen when
+                * we're in the middle of a firmware restart!
+                */
+               err = -EBUSY;
+               goto out;
+       }
+
+       interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+       if (!(interface_modes & BIT(newtype))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       /*
+        * Refuse a change that should be done by moving from the PAN
+        * context to the BSS context instead, if the BSS context is
+        * available and can support the new interface type.
+        */
+       if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
+           (bss_ctx->interface_modes & BIT(newtype) ||
+            bss_ctx->exclusive_interface_modes & BIT(newtype))) {
+               BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (ctx->exclusive_interface_modes & BIT(newtype)) {
+               for_each_context(priv, tmp) {
+                       if (ctx == tmp)
+                               continue;
+
+                       if (!tmp->vif)
+                               continue;
+
+                       /*
+                        * The current mode switch would be exclusive, but
+                        * another context is active ... refuse the switch.
+                        */
+                       err = -EBUSY;
+                       goto out;
+               }
+       }
+
+       /* success */
+       iwl_teardown_interface(priv, vif, true);
+       vif->type = newviftype;
+       vif->p2p = newp2p;
+       err = iwl_setup_interface(priv, ctx);
+       WARN_ON(err);
+       /*
+        * We've switched internally, but submitting to the
+        * device may have failed for some reason. Mask this
+        * error, because otherwise mac80211 will not switch
+        * (and set the interface type back) and we'll be
+        * out of sync with it.
+        */
+       err = 0;
+
+ out:
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return err;
+}
+
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif,
+                   struct cfg80211_scan_request *req)
+{
+       struct iwl_priv *priv = hw->priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       if (req->n_channels == 0)
+               return -EINVAL;
+
+       mutex_lock(&priv->shrd->mutex);
+
+       /*
+        * If an internal scan is in progress, just set
+        * up the scan_request as per above.
+        */
+       if (priv->scan_type != IWL_SCAN_NORMAL) {
+               IWL_DEBUG_SCAN(priv,
+                              "SCAN request during internal scan - defer\n");
+               priv->scan_request = req;
+               priv->scan_vif = vif;
+               ret = 0;
+       } else {
+               priv->scan_request = req;
+               priv->scan_vif = vif;
+               /*
+                * mac80211 will only ask for one band at a time
+                * so using channels[0] here is ok
+                */
+               ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
+                                       req->channels[0]->band);
+               if (ret) {
+                       priv->scan_request = NULL;
+                       priv->scan_vif = NULL;
+               }
+       }
+
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       mutex_unlock(&priv->shrd->mutex);
+
+       return ret;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+                      struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       int ret;
+
+       IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+                          "station %pM\n", sta->addr);
+       mutex_lock(&priv->shrd->mutex);
+       IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+                       sta->addr);
+       ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+       if (ret)
+               IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
+                       sta->addr);
+       mutex_unlock(&priv->shrd->mutex);
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+
+       return ret;
+}
+
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+       priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
+       priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+       priv->stations[sta_id].sta.sta.modify_mask = 0;
+       priv->stations[sta_id].sta.sleep_tx_count = 0;
+       priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+       iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+       spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+}
+
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+                          struct ieee80211_vif *vif,
+                          enum sta_notify_cmd cmd,
+                          struct ieee80211_sta *sta)
+{
+       struct iwl_priv *priv = hw->priv;
+       struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+       int sta_id;
+
+       IWL_DEBUG_MAC80211(priv, "enter\n");
+
+       switch (cmd) {
+       case STA_NOTIFY_SLEEP:
+               WARN_ON(!sta_priv->client);
+               sta_priv->asleep = true;
+               if (atomic_read(&sta_priv->pending_frames) > 0)
+                       ieee80211_sta_block_awake(hw, sta, true);
+               break;
+       case STA_NOTIFY_AWAKE:
+               WARN_ON(!sta_priv->client);
+               if (!sta_priv->asleep)
+                       break;
+               sta_priv->asleep = false;
+               sta_id = iwl_sta_id(sta);
+               if (sta_id != IWL_INVALID_STATION)
+                       iwl_sta_modify_ps_wake(priv, sta_id);
+               break;
+       default:
+               break;
+       }
+       IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+struct ieee80211_ops iwlagn_hw_ops = {
+       .tx = iwlagn_mac_tx,
+       .start = iwlagn_mac_start,
+       .stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM_SLEEP
+       .suspend = iwlagn_mac_suspend,
+       .resume = iwlagn_mac_resume,
+#endif
+       .add_interface = iwlagn_mac_add_interface,
+       .remove_interface = iwlagn_mac_remove_interface,
+       .change_interface = iwlagn_mac_change_interface,
+       .config = iwlagn_mac_config,
+       .configure_filter = iwlagn_configure_filter,
+       .set_key = iwlagn_mac_set_key,
+       .update_tkip_key = iwlagn_mac_update_tkip_key,
+       .set_rekey_data = iwlagn_mac_set_rekey_data,
+       .conf_tx = iwlagn_mac_conf_tx,
+       .bss_info_changed = iwlagn_bss_info_changed,
+       .ampdu_action = iwlagn_mac_ampdu_action,
+       .hw_scan = iwlagn_mac_hw_scan,
+       .sta_notify = iwlagn_mac_sta_notify,
+       .sta_add = iwlagn_mac_sta_add,
+       .sta_remove = iwlagn_mac_sta_remove,
+       .channel_switch = iwlagn_mac_channel_switch,
+       .flush = iwlagn_mac_flush,
+       .tx_last_beacon = iwlagn_mac_tx_last_beacon,
+       .remain_on_channel = iwlagn_mac_remain_on_channel,
+       .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+       .rssi_callback = iwlagn_mac_rssi_callback,
+       CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+       CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+       .tx_sync = iwlagn_mac_tx_sync,
+       .finish_tx_sync = iwlagn_mac_finish_tx_sync,
+       .set_tim = iwlagn_mac_set_tim,
+};
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_alloc_all(void)
+{
+       struct iwl_priv *priv;
+       /* mac80211 allocates memory for this device instance, including
+        *   space for this driver's private structure */
+       struct ieee80211_hw *hw;
+
+       hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
+       if (!hw)
+               goto out;
+
+       priv = hw->priv;
+       priv->hw = hw;
+
+out:
+       return hw;
+}
index 1800029..850ec8e 100644 (file)
@@ -256,6 +256,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
+       {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
 
 /* 6x30 Series */
        {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -325,46 +327,28 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
        {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
        {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
        {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
 
 /* 2x30 Series */
        {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
        {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
        {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
 
 /* 6x35 Series */
        {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
-       {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
-       {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
-       {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
 
 /* 105 Series */
        {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
-       {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
-       {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
        {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
 
 /* 135 Series */
        {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
-       {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
-       {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
-       {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
 
        {0}
 };
index e5d727f..359d218 100644 (file)
@@ -416,6 +416,8 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
 
                if (!iwl_is_associated_ctx(ctx))
                        continue;
+               if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
+                       continue;
                value = ctx->beacon_int;
                if (!value)
                        value = IWL_PASSIVE_DWELL_BASE;
@@ -678,7 +680,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                        priv->contexts[IWL_RXON_CTX_BSS].active.flags &
                                                RXON_FLG_CHANNEL_MODE_MSK)
                                       >> RXON_FLG_CHANNEL_MODE_POS;
-               if (chan_mod == CHANNEL_MODE_PURE_40) {
+               if ((priv->scan_request && priv->scan_request->no_cck) ||
+                   chan_mod == CHANNEL_MODE_PURE_40) {
                        rate = IWL_RATE_6M_PLCP;
                } else {
                        rate = IWL_RATE_1M_PLCP;
@@ -938,51 +941,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
        return 0;
 }
 
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
-                   struct ieee80211_vif *vif,
-                   struct cfg80211_scan_request *req)
-{
-       struct iwl_priv *priv = hw->priv;
-       int ret;
-
-       IWL_DEBUG_MAC80211(priv, "enter\n");
-
-       if (req->n_channels == 0)
-               return -EINVAL;
-
-       mutex_lock(&priv->shrd->mutex);
-
-       /*
-        * If an internal scan is in progress, just set
-        * up the scan_request as per above.
-        */
-       if (priv->scan_type != IWL_SCAN_NORMAL) {
-               IWL_DEBUG_SCAN(priv,
-                              "SCAN request during internal scan - defer\n");
-               priv->scan_request = req;
-               priv->scan_vif = vif;
-               ret = 0;
-       } else {
-               priv->scan_request = req;
-               priv->scan_vif = vif;
-               /*
-                * mac80211 will only ask for one band at a time
-                * so using channels[0] here is ok
-                */
-               ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
-                                       req->channels[0]->band);
-               if (ret) {
-                       priv->scan_request = NULL;
-                       priv->scan_vif = NULL;
-               }
-       }
-
-       IWL_DEBUG_MAC80211(priv, "leave\n");
-
-       mutex_unlock(&priv->shrd->mutex);
-
-       return ret;
-}
 
 /*
  * internal short scan, this function should only been called while associated.
index 14eaf37..39aa9cf 100644 (file)
@@ -174,7 +174,6 @@ struct iwl_mod_params {
  * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
  *     relevant for 1000, 6000 and up
  * @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
  * @calib_rt_cfg: setup runtime calibrations for the hw
  * @struct iwl_sensitivity_ranges: range of sensitivity values
  */
@@ -195,7 +194,6 @@ struct iwl_hw_params {
        u32 ct_kill_exit_threshold;
        unsigned int wd_timeout;
 
-       u32 calib_init_cfg;
        u32 calib_rt_cfg;
        const struct iwl_sensitivity_ranges *sens;
 };
@@ -259,6 +257,52 @@ struct iwl_tid_data {
 };
 
 /**
+ * enum iwl_ucode_type
+ *
+ * The type of ucode currently loaded on the hardware.
+ *
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ */
+enum iwl_ucode_type {
+       IWL_UCODE_NONE,
+       IWL_UCODE_REGULAR,
+       IWL_UCODE_INIT,
+       IWL_UCODE_WOWLAN,
+};
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+       struct list_head list;
+
+       void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
+                  void *data);
+       void *fn_data;
+
+       u8 cmd;
+       bool triggered, aborted;
+};
+
+/**
  * struct iwl_shared - shared fields for all the layers of the driver
  *
  * @dbg_level_dev: dbg level set per device. Prevails on
@@ -275,6 +319,10 @@ struct iwl_tid_data {
  * @sta_lock: protects the station table.
  *     If lock and sta_lock are needed, lock must be acquired first.
  * @mutex:
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
  */
 struct iwl_shared {
 #ifdef CONFIG_IWLWIFI_DEBUG
@@ -302,6 +350,14 @@ struct iwl_shared {
        struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
 
        wait_queue_head_t wait_command_queue;
+
+       /* ucode related variables */
+       enum iwl_ucode_type ucode_type;
+
+       /* notification wait support */
+       struct list_head notif_waits;
+       spinlock_t notif_wait_lock;
+       wait_queue_head_t notif_waitq;
 };
 
 /*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
@@ -445,6 +501,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
 void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
 
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+                             struct iwl_notification_wait *wait_entry,
+                             u8 cmd,
+                             void (*fn)(struct iwl_priv *priv,
+                                        struct iwl_rx_packet *pkt,
+                                        void *data),
+                             void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+                        struct iwl_notification_wait *wait_entry,
+                        unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+                          struct iwl_notification_wait *wait_entry);
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_reset_traffic_log(struct iwl_priv *priv);
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
similarity index 78%
rename from drivers/net/wireless/iwlwifi/iwl-sv-open.c
rename to drivers/net/wireless/iwlwifi/iwl-testmode.c
index 5e50d88..ff72dbc 100644 (file)
@@ -106,6 +106,10 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
        [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
 
        [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+       [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+       [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+       [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
 };
 
 /*
@@ -177,6 +181,18 @@ void iwl_testmode_init(struct iwl_priv *priv)
 {
        priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
        priv->testmode_trace.trace_enabled = false;
+       priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+       if (priv->testmode_sram.sram_readed) {
+               kfree(priv->testmode_sram.buff_addr);
+               priv->testmode_sram.buff_addr = NULL;
+               priv->testmode_sram.buff_size = 0;
+               priv->testmode_sram.num_chunks = 0;
+               priv->testmode_sram.sram_readed = false;
+       }
 }
 
 static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +217,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
 void iwl_testmode_cleanup(struct iwl_priv *priv)
 {
        iwl_trace_cleanup(priv);
+       iwl_sram_cleanup(priv);
 }
 
 /*
@@ -276,7 +293,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
        IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
 
        switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
-       case IWL_TM_CMD_APP2DEV_REG_READ32:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
                val32 = iwl_read32(bus(priv), ofs);
                IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
 
@@ -291,7 +308,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
                        IWL_DEBUG_INFO(priv,
                                       "Error sending msg : %d\n", status);
                break;
-       case IWL_TM_CMD_APP2DEV_REG_WRITE32:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
                if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
                        IWL_DEBUG_INFO(priv,
                                       "Error finding value to write\n");
@@ -302,7 +319,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
                        iwl_write32(bus(priv), ofs, val32);
                }
                break;
-       case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
                if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
                        IWL_DEBUG_INFO(priv, "Error finding value to write\n");
                        return -ENOMSG;
@@ -312,6 +329,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
                        iwl_write8(bus(priv), ofs, val8);
                }
                break;
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+               val32 = iwl_read_prph(bus(priv), ofs);
+               IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+               skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+               if (!skb) {
+                       IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+                       return -ENOMEM;
+               }
+               NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+               status = cfg80211_testmode_reply(skb);
+               if (status < 0)
+                       IWL_DEBUG_INFO(priv,
+                                       "Error sending msg : %d\n", status);
+               break;
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+               if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+                       IWL_DEBUG_INFO(priv,
+                                       "Error finding value to write\n");
+                       return -ENOMSG;
+               } else {
+                       val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+                       IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+                       iwl_write_prph(bus(priv), ofs, val32);
+               }
+               break;
        default:
                IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
                return -ENOSYS;
@@ -330,7 +373,7 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
        struct iwl_notification_wait calib_wait;
        int ret;
 
-       iwlagn_init_notification_wait(priv, &calib_wait,
+       iwl_init_notification_wait(priv->shrd, &calib_wait,
                                      CALIBRATION_COMPLETE_NOTIFICATION,
                                      NULL, NULL);
        ret = iwlagn_init_alive_start(priv);
@@ -340,14 +383,14 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
                goto cfg_init_calib_error;
        }
 
-       ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
+       ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
        if (ret)
                IWL_DEBUG_INFO(priv, "Error detecting"
                        " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
        return ret;
 
 cfg_init_calib_error:
-       iwlagn_remove_notification(priv, &calib_wait);
+       iwl_remove_notification(priv->shrd, &calib_wait);
        return ret;
 }
 
@@ -396,8 +439,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                break;
 
        case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
-               status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
-                                                     IWL_UCODE_INIT);
+               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
                if (status)
                        IWL_DEBUG_INFO(priv,
                                "Error loading init ucode: %d\n", status);
@@ -409,9 +451,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                break;
 
        case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
-               status = iwlagn_load_ucode_wait_alive(priv,
-                                          &priv->ucode_rt,
-                                          IWL_UCODE_REGULAR);
+               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
                if (status) {
                        IWL_DEBUG_INFO(priv,
                                "Error loading runtime ucode: %d\n", status);
@@ -423,6 +463,21 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
                                "Error starting the device: %d\n", status);
                break;
 
+       case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+               iwl_scan_cancel_timeout(priv, 200);
+               iwl_trans_stop_device(trans(priv));
+               status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+               if (status) {
+                       IWL_DEBUG_INFO(priv,
+                               "Error loading WOWLAN ucode: %d\n", status);
+                       break;
+               }
+               status = iwl_alive_start(priv);
+               if (status)
+                       IWL_DEBUG_INFO(priv,
+                               "Error starting the device: %d\n", status);
+               break;
+
        case IWL_TM_CMD_APP2DEV_GET_EEPROM:
                if (priv->eeprom) {
                        skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
@@ -535,7 +590,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
                }
                priv->testmode_trace.num_chunks =
                        DIV_ROUND_UP(priv->testmode_trace.buff_size,
-                                    TRACE_CHUNK_SIZE);
+                                    DUMP_CHUNK_SIZE);
                break;
 
        case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -567,15 +622,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
                idx = cb->args[4];
                if (idx >= priv->testmode_trace.num_chunks)
                        return -ENOENT;
-               length = TRACE_CHUNK_SIZE;
+               length = DUMP_CHUNK_SIZE;
                if (((idx + 1) == priv->testmode_trace.num_chunks) &&
-                   (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+                   (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
                        length = priv->testmode_trace.buff_size %
-                               TRACE_CHUNK_SIZE;
+                               DUMP_CHUNK_SIZE;
 
                NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
                        priv->testmode_trace.trace_addr +
-                       (TRACE_CHUNK_SIZE * idx));
+                       (DUMP_CHUNK_SIZE * idx));
                idx++;
                cb->args[4] = idx;
                return 0;
@@ -621,6 +676,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
        return 0;
 }
 
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+       struct iwl_priv *priv = hw->priv;
+       u32 base, ofs, size, maxsize;
+
+       if (priv->testmode_sram.sram_readed)
+               return -EBUSY;
+
+       if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+               IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+               return -ENOMSG;
+       }
+       ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+       if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+               IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+               return -ENOMSG;
+       }
+       size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+       switch (priv->shrd->ucode_type) {
+       case IWL_UCODE_REGULAR:
+               maxsize = trans(priv)->ucode_rt.data.len;
+               break;
+       case IWL_UCODE_INIT:
+               maxsize = trans(priv)->ucode_init.data.len;
+               break;
+       case IWL_UCODE_WOWLAN:
+               maxsize = trans(priv)->ucode_wowlan.data.len;
+               break;
+       case IWL_UCODE_NONE:
+               IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+               return -ENOSYS;
+       default:
+               IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+               return -ENOSYS;
+       }
+       if ((ofs + size) > maxsize) {
+               IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+               return -EINVAL;
+       }
+       priv->testmode_sram.buff_size = (size / 4) * 4;
+       priv->testmode_sram.buff_addr =
+               kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+       if (priv->testmode_sram.buff_addr == NULL) {
+               IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+               return -ENOMEM;
+       }
+       base = 0x800000;
+       _iwl_read_targ_mem_words(bus(priv), base + ofs,
+                                       priv->testmode_sram.buff_addr,
+                                       priv->testmode_sram.buff_size / 4);
+       priv->testmode_sram.num_chunks =
+               DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+       priv->testmode_sram.sram_readed = true;
+       return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+                                  struct sk_buff *skb,
+                                  struct netlink_callback *cb)
+{
+       struct iwl_priv *priv = hw->priv;
+       int idx, length;
+
+       if (priv->testmode_sram.sram_readed) {
+               idx = cb->args[4];
+               if (idx >= priv->testmode_sram.num_chunks) {
+                       iwl_sram_cleanup(priv);
+                       return -ENOENT;
+               }
+               length = DUMP_CHUNK_SIZE;
+               if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+                   (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+                       length = priv->testmode_sram.buff_size %
+                               DUMP_CHUNK_SIZE;
+
+               NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+                       priv->testmode_sram.buff_addr +
+                       (DUMP_CHUNK_SIZE * idx));
+               idx++;
+               cb->args[4] = idx;
+               return 0;
+       } else
+               return -EFAULT;
+
+ nla_put_failure:
+       return -ENOBUFS;
+}
+
 
 /* The testmode gnl message handler that takes the gnl message from the
  * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -668,9 +827,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
                IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
                result = iwl_testmode_ucode(hw, tb);
                break;
-       case IWL_TM_CMD_APP2DEV_REG_READ32:
-       case IWL_TM_CMD_APP2DEV_REG_WRITE32:
-       case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+       case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+       case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
                IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
                result = iwl_testmode_reg(hw, tb);
                break;
@@ -680,6 +841,7 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
        case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
        case IWL_TM_CMD_APP2DEV_GET_EEPROM:
        case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+       case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
                IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
                result = iwl_testmode_driver(hw, tb);
                break;
@@ -696,6 +858,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
                result = iwl_testmode_ownership(hw, tb);
                break;
 
+       case IWL_TM_CMD_APP2DEV_READ_SRAM:
+               IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+               result = iwl_testmode_sram(hw, tb);
+               break;
+
        default:
                IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
                result = -ENOSYS;
@@ -744,6 +911,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
                IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
                result = iwl_testmode_trace_dump(hw, tb, skb, cb);
                break;
+       case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+               IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+               result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+               break;
        default:
                result = -EINVAL;
                break;
index b980bda..deedd27 100644 (file)
@@ -76,9 +76,9 @@
  *     the actual uCode host command ID is carried with
  *     IWL_TM_ATTR_UCODE_CMD_ID
  *
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
  *     commands from user applicaiton to access register
  *
  * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
  * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
  *     commands from kernel space to carry the eeprom response
  *     to user application
+ *
  * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
  *     commands from user application to own change the ownership of the uCode
  *     if application has the ownership, the only host command from
  *     testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ *     commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ *     commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ *
  */
 enum iwl_tm_cmd_t {
        IWL_TM_CMD_APP2DEV_UCODE                = 1,
-       IWL_TM_CMD_APP2DEV_REG_READ32           = 2,
-       IWL_TM_CMD_APP2DEV_REG_WRITE32          = 3,
-       IWL_TM_CMD_APP2DEV_REG_WRITE8           = 4,
+       IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32    = 2,
+       IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32   = 3,
+       IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8    = 4,
        IWL_TM_CMD_APP2DEV_GET_DEVICENAME       = 5,
        IWL_TM_CMD_APP2DEV_LOAD_INIT_FW         = 6,
        IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB       = 7,
@@ -126,7 +138,12 @@ enum iwl_tm_cmd_t {
        IWL_TM_CMD_DEV2APP_UCODE_RX_PKT         = 15,
        IWL_TM_CMD_DEV2APP_EEPROM_RSP           = 16,
        IWL_TM_CMD_APP2DEV_OWNERSHIP            = 17,
-       IWL_TM_CMD_MAX                          = 18,
+       IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32  = 18,
+       IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
+       IWL_TM_CMD_APP2DEV_READ_SRAM            = 20,
+       IWL_TM_CMD_APP2DEV_DUMP_SRAM            = 21,
+       IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW       = 22,
+       IWL_TM_CMD_MAX                          = 23,
 };
 
 /*
@@ -196,6 +213,18 @@ enum iwl_tm_cmd_t {
  *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
  *     The mandatory fields are:
  *     IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ *     The mandatory fields are:
+ *     IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ *     IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ *     When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ *     IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
  */
 enum iwl_tm_attr_t {
        IWL_TM_ATTR_NOT_APPLICABLE              = 0,
@@ -213,7 +242,10 @@ enum iwl_tm_attr_t {
        IWL_TM_ATTR_TRACE_DUMP                  = 12,
        IWL_TM_ATTR_FIXRATE                     = 13,
        IWL_TM_ATTR_UCODE_OWNER                 = 14,
-       IWL_TM_ATTR_MAX                         = 15,
+       IWL_TM_ATTR_SRAM_ADDR                   = 15,
+       IWL_TM_ATTR_SRAM_SIZE                   = 16,
+       IWL_TM_ATTR_SRAM_DUMP                   = 17,
+       IWL_TM_ATTR_MAX                         = 18,
 };
 
 /* uCode trace buffer */
@@ -221,6 +253,8 @@ enum iwl_tm_attr_t {
 #define TRACE_BUFF_SIZE_MIN    0x20000
 #define TRACE_BUFF_SIZE_DEF    TRACE_BUFF_SIZE_MIN
 #define TRACE_BUFF_PADD                0x2000
-#define TRACE_CHUNK_SIZE       (PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE                (PAGE_SIZE - 1024)
 
 #endif
index 2b6756e..5a384b3 100644 (file)
@@ -354,8 +354,13 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
        txq->swq_id = (hwq << 2) | ac;
 }
 
+static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
+{
+       return txq->swq_id & 0x3;
+}
+
 static inline void iwl_wake_queue(struct iwl_trans *trans,
-                                 struct iwl_tx_queue *txq)
+                                 struct iwl_tx_queue *txq, const char *msg)
 {
        u8 queue = txq->swq_id;
        u8 ac = queue & 3;
@@ -363,13 +368,22 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie =
                IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
-               if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+       if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
+               if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
                        iwl_wake_sw_queue(priv(trans), ac);
+                       IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
+                                           hwq, ac, msg);
+               } else {
+                       IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
+                                           " stop count %d. %s",
+                                           hwq, ac, atomic_read(&trans_pcie->
+                                           queue_stop_count[ac]), msg);
+               }
+       }
 }
 
 static inline void iwl_stop_queue(struct iwl_trans *trans,
-                                 struct iwl_tx_queue *txq)
+                                 struct iwl_tx_queue *txq, const char *msg)
 {
        u8 queue = txq->swq_id;
        u8 ac = queue & 3;
@@ -377,9 +391,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie =
                IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
-               if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+       if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
+               if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
                        iwl_stop_sw_queue(priv(trans), ac);
+                       IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
+                                           " stop count %d. %s",
+                                           hwq, ac, atomic_read(&trans_pcie->
+                                           queue_stop_count[ac]), msg);
+               } else {
+                       IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
+                                           " stop count %d. %s",
+                                           hwq, ac, atomic_read(&trans_pcie->
+                                           queue_stop_count[ac]), msg);
+               }
+       } else {
+               IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
+                                   hwq, msg);
+       }
 }
 
 #ifdef ieee80211_stop_queue
index 374c68c..becd921 100644 (file)
@@ -595,7 +595,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
                IWL_TRANS_GET_PCIE_TRANS(trans);
 
        base = priv->device_pointers.error_event_table;
-       if (priv->ucode_type == IWL_UCODE_INIT) {
+       if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
                if (!base)
                        base = priv->init_errlog_ptr;
        } else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
                IWL_ERR(trans,
                        "Not valid error log pointer 0x%08X for %s uCode\n",
                        base,
-                       (priv->ucode_type == IWL_UCODE_INIT)
+                       (trans->shrd->ucode_type == IWL_UCODE_INIT)
                                        ? "Init" : "RT");
                return;
        }
@@ -710,7 +710,7 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
                return pos;
 
        base = priv->device_pointers.log_event_table;
-       if (priv->ucode_type == IWL_UCODE_INIT) {
+       if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
                if (!base)
                        base = priv->init_evtlog_ptr;
        } else {
@@ -824,7 +824,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
        struct iwl_priv *priv = priv(trans);
 
        base = priv->device_pointers.log_event_table;
-       if (priv->ucode_type == IWL_UCODE_INIT) {
+       if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
                logsize = priv->init_evtlog_size;
                if (!base)
                        base = priv->init_evtlog_ptr;
@@ -838,7 +838,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
                IWL_ERR(trans,
                        "Invalid event log pointer 0x%08X for %s uCode\n",
                        base,
-                       (priv->ucode_type == IWL_UCODE_INIT)
+                       (trans->shrd->ucode_type == IWL_UCODE_INIT)
                                        ? "Init" : "RT");
                return -EINVAL;
        }
@@ -1108,7 +1108,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
                isr_stats->tx++;
                handled |= CSR_INT_BIT_FH_TX;
                /* Wake up uCode load routine, now that load is complete */
-               priv(trans)->ucode_write_complete = 1;
+               trans->ucode_write_complete = 1;
                wake_up(&trans->shrd->wait_command_queue);
        }
 
index 4a0c953..79331fb 100644 (file)
@@ -430,7 +430,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
 
        txq->sched_retry = scd_retry;
 
-       IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
+       IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
                       active ? "Activate" : "Deactivate",
                       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
 }
@@ -559,14 +559,14 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
        tid_data->agg.txq_id = txq_id;
        iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
 
-       tid_data = &trans->shrd->tid_data[sta_id][tid];
        if (tid_data->tfds_in_queue == 0) {
-               IWL_DEBUG_HT(trans, "HW queue is empty\n");
+               IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
                tid_data->agg.state = IWL_AGG_ON;
                iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
        } else {
-               IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
-                            "queue\n", tid_data->tfds_in_queue);
+               IWL_DEBUG_TX_QUEUES(trans,
+                                   "HW queue is NOT empty: %d packets in HW"
+                                   " queue\n", tid_data->tfds_in_queue);
                tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
        }
        spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
@@ -643,14 +643,15 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
 
        /* The queue is not empty */
        if (write_ptr != read_ptr) {
-               IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
+               IWL_DEBUG_TX_QUEUES(trans,
+                                   "Stopping a non empty AGG HW QUEUE\n");
                trans->shrd->tid_data[sta_id][tid].agg.state =
                        IWL_EMPTYING_HW_QUEUE_DELBA;
                spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
                return 0;
        }
 
-       IWL_DEBUG_HT(trans, "HW queue is empty\n");
+       IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
 turn_off:
        trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
 
@@ -982,7 +983,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 
        ret = iwl_enqueue_hcmd(trans, cmd);
        if (ret < 0) {
-               IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+               IWL_DEBUG_QUIET_RFKILL(trans,
+                       "Error sending %s: enqueue_hcmd failed: %d\n",
                          get_cmd_string(cmd->id), ret);
                return ret;
        }
@@ -1000,6 +1002,20 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
                        get_cmd_string(cmd->id));
 
+       if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+               return -EBUSY;
+
+
+       if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+               IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
+                              get_cmd_string(cmd->id));
+               return -ECANCELED;
+       }
+       if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+               IWL_ERR(trans, "Command %s failed: FW Error\n",
+                              get_cmd_string(cmd->id));
+               return -EIO;
+       }
        set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
        IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
                        get_cmd_string(cmd->id));
@@ -1008,7 +1024,8 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        if (cmd_idx < 0) {
                ret = cmd_idx;
                clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
-               IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+               IWL_DEBUG_QUIET_RFKILL(trans,
+                       "Error sending %s: enqueue_hcmd failed: %d\n",
                          get_cmd_string(cmd->id), ret);
                return ret;
        }
@@ -1022,12 +1039,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
                                &trans_pcie->txq[trans->shrd->cmd_queue];
                        struct iwl_queue *q = &txq->q;
 
-                       IWL_ERR(trans,
+                       IWL_DEBUG_QUIET_RFKILL(trans,
                                "Error sending %s: time out after %dms.\n",
                                get_cmd_string(cmd->id),
                                jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
 
-                       IWL_ERR(trans,
+                       IWL_DEBUG_QUIET_RFKILL(trans,
                                "Current CMD queue read_ptr %d write_ptr %d\n",
                                q->read_ptr, q->write_ptr);
 
@@ -1039,18 +1056,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
                }
        }
 
-       if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
-               IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
-                              get_cmd_string(cmd->id));
-               ret = -ECANCELED;
-               goto fail;
-       }
-       if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
-               IWL_ERR(trans, "Command %s failed: FW Error\n",
-                              get_cmd_string(cmd->id));
-               ret = -EIO;
-               goto fail;
-       }
        if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
                IWL_ERR(trans, "Error: Response NULL in '%s'\n",
                          get_cmd_string(cmd->id));
@@ -1071,7 +1076,7 @@ cancel:
                trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
                                                        ~CMD_WANT_SKB;
        }
-fail:
+
        if (cmd->reply_page) {
                iwl_free_pages(trans->shrd, cmd->reply_page);
                cmd->reply_page = 0;
@@ -1115,9 +1120,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
                return 0;
        }
 
-       IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
-                          q->read_ptr, index);
-
        if (WARN_ON(!skb_queue_empty(skbs)))
                return 0;
 
index ce91898..304b2ea 100644 (file)
@@ -1100,13 +1100,21 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                hdr->seq_ctrl = hdr->seq_ctrl &
                                cpu_to_le16(IEEE80211_SCTL_FRAG);
                hdr->seq_ctrl |= cpu_to_le16(seq_number);
-               seq_number += 0x10;
                /* aggregation is on for this <sta,tid> */
                if (info->flags & IEEE80211_TX_CTL_AMPDU) {
-                       WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
+                       if (WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON)) {
+                               IWL_ERR(trans, "TX_CTL_AMPDU while not in AGG:"
+                                       " Tx flags = 0x%08x, agg.state = %d",
+                                       info->flags, tid_data->agg.state);
+                               IWL_ERR(trans, "sta_id = %d, tid = %d "
+                                       "txq_id = %d, seq_num = %d", sta_id,
+                                       tid, tid_data->agg.txq_id,
+                                       seq_number >> 4);
+                       }
                        txq_id = tid_data->agg.txq_id;
                        is_agg = true;
                }
+               seq_number += 0x10;
        }
 
        /* Copy MAC header from skb into command buffer */
@@ -1232,7 +1240,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                        txq->need_update = 1;
                        iwl_txq_update_write_ptr(trans, txq);
                } else {
-                       iwl_stop_queue(trans, txq);
+                       iwl_stop_queue(trans, txq, "Queue is full");
                }
        }
        return 0;
@@ -1284,20 +1292,21 @@ static int iwlagn_txq_check_empty(struct iwl_trans *trans,
                /* aggregated HW queue */
                if ((txq_id  == tid_data->agg.txq_id) &&
                    (q->read_ptr == q->write_ptr)) {
-                       IWL_DEBUG_HT(trans,
+                       IWL_DEBUG_TX_QUEUES(trans,
                                "HW queue empty: continue DELBA flow\n");
                        iwl_trans_pcie_txq_agg_disable(trans, txq_id);
                        tid_data->agg.state = IWL_AGG_OFF;
                        iwl_stop_tx_ba_trans_ready(priv(trans),
                                                   NUM_IWL_RXON_CTX,
                                                   sta_id, tid);
-                       iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+                       iwl_wake_queue(trans, &trans_pcie->txq[txq_id],
+                                      "DELBA flow complete");
                }
                break;
        case IWL_EMPTYING_HW_QUEUE_ADDBA:
                /* We are reclaiming the last packet of the queue */
                if (tid_data->tfds_in_queue == 0) {
-                       IWL_DEBUG_HT(trans,
+                       IWL_DEBUG_TX_QUEUES(trans,
                                "HW queue empty: continue ADDBA flow\n");
                        tid_data->agg.state = IWL_AGG_ON;
                        iwl_start_tx_ba_trans_ready(priv(trans),
@@ -1350,12 +1359,12 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
        }
 
        if (txq->q.read_ptr != tfd_num) {
-               IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
-                               "scd_ssn=%d idx=%d txq=%d swq=%d\n",
-                               ssn , tfd_num, txq_id, txq->swq_id);
+               IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
+                               txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
+                               tfd_num, ssn);
                freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
                if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
-                       iwl_wake_queue(trans, txq);
+                       iwl_wake_queue(trans, txq, "Packets reclaimed");
        }
 
        iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
@@ -1419,7 +1428,8 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
 #endif /* CONFIG_PM_SLEEP */
 
 static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
-                                         enum iwl_rxon_context_id ctx)
+                                         enum iwl_rxon_context_id ctx,
+                                         const char *msg)
 {
        u8 ac, txq_id;
        struct iwl_trans_pcie *trans_pcie =
@@ -1427,11 +1437,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
 
        for (ac = 0; ac < AC_NUM; ac++) {
                txq_id = trans_pcie->ac_to_queue[ctx][ac];
-               IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+               IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
                        ac,
                        (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
                              ? "stopped" : "awake");
-               iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+               iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
        }
 }
 
@@ -1454,11 +1464,12 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
        return iwl_trans;
 }
 
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
+                                     const char *msg)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+       iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
 }
 
 #define IWL_FLUSH_WAIT_MS      2000
@@ -1513,8 +1524,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
        if (time_after(jiffies, timeout)) {
                IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
                        hw_params(trans).wd_timeout);
-               IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+               IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
                        q->read_ptr, q->write_ptr);
+               IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+                       iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+                               & (TFD_QUEUE_SIZE_MAX - 1),
+                       iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
                return 1;
        }
 
index c592312..4a29b8a 100644 (file)
@@ -171,7 +171,8 @@ struct iwl_trans_ops {
        void (*tx_start)(struct iwl_trans *trans);
 
        void (*wake_any_queue)(struct iwl_trans *trans,
-                              enum iwl_rxon_context_id ctx);
+                              enum iwl_rxon_context_id ctx,
+                              const char *msg);
 
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
@@ -196,7 +197,7 @@ struct iwl_trans_ops {
 
        void (*free)(struct iwl_trans *trans);
 
-       void (*stop_queue)(struct iwl_trans *trans, int q);
+       void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
 
        int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
        int (*check_stuck_queue)(struct iwl_trans *trans, int q);
@@ -207,17 +208,41 @@ struct iwl_trans_ops {
 #endif
 };
 
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+       dma_addr_t p_addr;      /* hardware address */
+       void *v_addr;           /* software address */
+       u32 len;                /* size in bytes */
+};
+
+struct fw_img {
+       struct fw_desc code;    /* firmware code image */
+       struct fw_desc data;    /* firmware data image */
+};
+
 /**
  * struct iwl_trans - transport common data
  * @ops - pointer to iwl_trans_ops
  * @shrd - pointer to iwl_shared which holds shared data from the upper layer
  * @hcmd_lock: protects HCMD
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_rt: run time ucode image
+ * @ucode_init: init ucode image
+ * @ucode_wowlan: wake on wireless ucode image (optional)
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
        struct iwl_shared *shrd;
        spinlock_t hcmd_lock;
 
+       u8 ucode_write_complete;        /* the image write is complete */
+       struct fw_img ucode_rt;
+       struct fw_img ucode_init;
+       struct fw_img ucode_wowlan;
+
+       /* eeprom related variables */
+       int    nvm_device_type;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -249,9 +274,10 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans)
 }
 
 static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
-                                           enum iwl_rxon_context_id ctx)
+                                           enum iwl_rxon_context_id ctx,
+                                           const char *msg)
 {
-       trans->ops->wake_any_queue(trans, ctx);
+       trans->ops->wake_any_queue(trans, ctx, msg);
 }
 
 
@@ -311,9 +337,10 @@ static inline void iwl_trans_free(struct iwl_trans *trans)
        trans->ops->free(trans);
 }
 
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
+                                       const char *msg)
 {
-       trans->ops->stop_queue(trans, q);
+       trans->ops->stop_queue(trans, q, msg);
 }
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -348,4 +375,8 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
 ******************************************************/
 extern const struct iwl_trans_ops trans_ops_pcie;
 
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+                     const void *data, size_t len);
+void iwl_dealloc_ucode(struct iwl_trans *trans);
+
 #endif /* __iwl_trans_h__ */
similarity index 64%
rename from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
rename to drivers/net/wireless/iwlwifi/iwl-ucode.c
index 8ba0dd5..b365de4 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
+#include <linux/dma-mapping.h>
 
 #include "iwl-dev.h"
 #include "iwl-core.h"
@@ -72,51 +73,98 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
        {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
 };
 
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+{
+       if (desc->v_addr)
+               dma_free_coherent(bus->dev, desc->len,
+                                 desc->v_addr, desc->p_addr);
+       desc->v_addr = NULL;
+       desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
+{
+       iwl_free_fw_desc(bus, &img->code);
+       iwl_free_fw_desc(bus, &img->data);
+}
+
+void iwl_dealloc_ucode(struct iwl_trans *trans)
+{
+       iwl_free_fw_img(bus(trans), &trans->ucode_rt);
+       iwl_free_fw_img(bus(trans), &trans->ucode_init);
+       iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
+}
+
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+                     const void *data, size_t len)
+{
+       if (!len) {
+               desc->v_addr = NULL;
+               return -EINVAL;
+       }
+
+       desc->v_addr = dma_alloc_coherent(bus->dev, len,
+                                         &desc->p_addr, GFP_KERNEL);
+       if (!desc->v_addr)
+               return -ENOMEM;
+
+       desc->len = len;
+       memcpy(desc->v_addr, data, len);
+       return 0;
+}
+
 /*
  * ucode
  */
-static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
                                struct fw_desc *image, u32 dst_addr)
 {
+       struct iwl_bus *bus = bus(trans);
        dma_addr_t phy_addr = image->p_addr;
        u32 byte_cnt = image->len;
        int ret;
 
-       priv->ucode_write_complete = 0;
+       trans->ucode_write_complete = 0;
 
-       iwl_write_direct32(bus(priv),
+       iwl_write_direct32(bus,
                FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
                FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
 
-       iwl_write_direct32(bus(priv),
+       iwl_write_direct32(bus,
                FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
 
-       iwl_write_direct32(bus(priv),
+       iwl_write_direct32(bus,
                FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
                phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
 
-       iwl_write_direct32(bus(priv),
+       iwl_write_direct32(bus,
                FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
                (iwl_get_dma_hi_addr(phy_addr)
                        << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
 
-       iwl_write_direct32(bus(priv),
+       iwl_write_direct32(bus,
                FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
                1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
                1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
                FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
 
-       iwl_write_direct32(bus(priv),
+       iwl_write_direct32(bus,
                FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
                FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE       |
                FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE    |
                FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
 
-       IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
-       ret = wait_event_timeout(priv->shrd->wait_command_queue,
-                                priv->ucode_write_complete, 5 * HZ);
+       IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
+       ret = wait_event_timeout(trans->shrd->wait_command_queue,
+                                trans->ucode_write_complete, 5 * HZ);
        if (!ret) {
-               IWL_ERR(priv, "Could not load the %s uCode section\n",
+               IWL_ERR(trans, "Could not load the %s uCode section\n",
                        name);
                return -ETIMEDOUT;
        }
@@ -124,24 +172,48 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
        return 0;
 }
 
-static int iwlagn_load_given_ucode(struct iwl_priv *priv,
-                                  struct fw_img *image)
+static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
+                                       enum iwl_ucode_type ucode_type)
+{
+       switch (ucode_type) {
+       case IWL_UCODE_INIT:
+               return &trans->ucode_init;
+       case IWL_UCODE_WOWLAN:
+               return &trans->ucode_wowlan;
+       case IWL_UCODE_REGULAR:
+               return &trans->ucode_rt;
+       case IWL_UCODE_NONE:
+               break;
+       }
+       return NULL;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+                                  enum iwl_ucode_type ucode_type)
 {
        int ret = 0;
+       struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
 
-       ret = iwlagn_load_section(priv, "INST", &image->code,
+
+       if (!image) {
+               IWL_ERR(trans, "Invalid ucode requested (%d)\n",
+                       ucode_type);
+               return -EINVAL;
+       }
+
+       ret = iwl_load_section(trans, "INST", &image->code,
                                   IWLAGN_RTC_INST_LOWER_BOUND);
        if (ret)
                return ret;
 
-       return iwlagn_load_section(priv, "DATA", &image->data,
+       return iwl_load_section(trans, "DATA", &image->data,
                                    IWLAGN_RTC_DATA_LOWER_BOUND);
 }
 
 /*
  *  Calibration
  */
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+static int iwl_set_Xtal_calib(struct iwl_priv *priv)
 {
        struct iwl_calib_xtal_freq_cmd cmd;
        __le16 *xtal_calib =
@@ -150,11 +222,10 @@ static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
        iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
        cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
        cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
-       return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
-                            (u8 *)&cmd, sizeof(cmd));
+       return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
 }
 
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
 {
        struct iwl_calib_temperature_offset_cmd cmd;
        __le16 *offset_calib =
@@ -168,11 +239,10 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
 
        IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
                        le16_to_cpu(cmd.radio_sensor_offset));
-       return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
-                            (u8 *)&cmd, sizeof(cmd));
+       return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
 }
 
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
 {
        struct iwl_calib_temperature_offset_v2_cmd cmd;
        __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
@@ -204,11 +274,10 @@ static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
        IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
                        le16_to_cpu(cmd.burntVoltageRef));
 
-       return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
-                            (u8 *)&cmd, sizeof(cmd));
+       return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
 }
 
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
 {
        struct iwl_calib_cfg_cmd calib_cfg_cmd;
        struct iwl_host_cmd cmd = {
@@ -224,7 +293,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
        calib_cfg_cmd.ucd_calib_cfg.flags =
                IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
 
-       return iwl_trans_send_cmd(trans(priv), &cmd);
+       return iwl_trans_send_cmd(trans, &cmd);
 }
 
 int iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -234,37 +303,14 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
        int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       int index;
 
        /* reduce the size of the length field itself */
        len -= 4;
 
-       /* Define the order in which the results will be sent to the runtime
-        * uCode. iwl_send_calib_results sends them in a row according to
-        * their index. We sort them here
-        */
-       switch (hdr->op_code) {
-       case IWL_PHY_CALIBRATE_DC_CMD:
-               index = IWL_CALIB_DC;
-               break;
-       case IWL_PHY_CALIBRATE_LO_CMD:
-               index = IWL_CALIB_LO;
-               break;
-       case IWL_PHY_CALIBRATE_TX_IQ_CMD:
-               index = IWL_CALIB_TX_IQ;
-               break;
-       case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
-               index = IWL_CALIB_TX_IQ_PERD;
-               break;
-       case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
-               index = IWL_CALIB_BASE_BAND;
-               break;
-       default:
-               IWL_ERR(priv, "Unknown calibration notification %d\n",
-                         hdr->op_code);
-               return -1;
-       }
-       iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+       if (iwl_calib_set(priv, hdr, len))
+               IWL_ERR(priv, "Failed to record calibration data %d\n",
+                       hdr->op_code);
+
        return 0;
 }
 
@@ -280,14 +326,14 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
                 * no need to close the envlope since we are going
                 * to load the runtime uCode later.
                 */
-               ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+               ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
                        BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
                if (ret)
                        return ret;
 
        }
 
-       ret = iwlagn_send_calib_cfg(priv);
+       ret = iwl_send_calib_cfg(trans(priv));
        if (ret)
                return ret;
 
@@ -297,15 +343,15 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
         */
        if (priv->cfg->need_temp_offset_calib) {
                if (priv->cfg->temp_offset_v2)
-                       return iwlagn_set_temperature_offset_calib_v2(priv);
+                       return iwl_set_temperature_offset_calib_v2(priv);
                else
-                       return iwlagn_set_temperature_offset_calib(priv);
+                       return iwl_set_temperature_offset_calib(priv);
        }
 
        return 0;
 }
 
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
 {
        struct iwl_wimax_coex_cmd coex_cmd;
 
@@ -333,7 +379,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
                                sizeof(coex_cmd), &coex_cmd);
 }
 
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
        ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
                (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
        ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
@@ -355,42 +401,42 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
        0, 0, 0, 0, 0, 0, 0
 };
 
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
+void iwl_send_prio_tbl(struct iwl_trans *trans)
 {
        struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
 
-       memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
-               sizeof(iwlagn_bt_prio_tbl));
-       if (iwl_trans_send_cmd_pdu(trans(priv),
+       memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+               sizeof(iwl_bt_prio_tbl));
+       if (iwl_trans_send_cmd_pdu(trans,
                                REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
                                sizeof(prio_tbl_cmd), &prio_tbl_cmd))
-               IWL_ERR(priv, "failed to send BT prio tbl command\n");
+               IWL_ERR(trans, "failed to send BT prio tbl command\n");
 }
 
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
 {
        struct iwl_bt_coex_prot_env_cmd env_cmd;
        int ret;
 
        env_cmd.action = action;
        env_cmd.type = type;
-       ret = iwl_trans_send_cmd_pdu(trans(priv),
+       ret = iwl_trans_send_cmd_pdu(trans,
                               REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
                               sizeof(env_cmd), &env_cmd);
        if (ret)
-               IWL_ERR(priv, "failed to send BT env command\n");
+               IWL_ERR(trans, "failed to send BT env command\n");
        return ret;
 }
 
 
-static int iwlagn_alive_notify(struct iwl_priv *priv)
+static int iwl_alive_notify(struct iwl_priv *priv)
 {
        struct iwl_rxon_context *ctx;
        int ret;
 
        if (!priv->tx_cmd_pool)
                priv->tx_cmd_pool =
-                       kmem_cache_create("iwlagn_dev_cmd",
+                       kmem_cache_create("iwl_dev_cmd",
                                          sizeof(struct iwl_device_cmd),
                                          sizeof(void *), 0, NULL);
 
@@ -401,13 +447,15 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
        for_each_context(priv, ctx)
                ctx->last_tx_rejected = false;
 
-       ret = iwlagn_send_wimax_coex(priv);
+       ret = iwl_send_wimax_coex(priv);
        if (ret)
                return ret;
 
-       ret = iwlagn_set_Xtal_calib(priv);
-       if (ret)
-               return ret;
+       if (!priv->cfg->no_xtal_calib) {
+               ret = iwl_set_Xtal_calib(priv);
+               if (ret)
+                       return ret;
+       }
 
        return iwl_send_calib_results(priv);
 }
@@ -418,7 +466,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
  *   using sample data 100 bytes apart.  If these sample points are good,
  *   it's a pretty good bet that everything between them is good, too.
  */
-static int iwl_verify_inst_sparse(struct iwl_priv *priv,
+static int iwl_verify_inst_sparse(struct iwl_bus *bus,
                                      struct fw_desc *fw_desc)
 {
        __le32 *image = (__le32 *)fw_desc->v_addr;
@@ -426,15 +474,15 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
        u32 val;
        u32 i;
 
-       IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+       IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
 
        for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
                /* read data comes through single port, auto-incr addr */
                /* NOTE: Use the debugless read so we don't flood kernel log
                 * if IWL_DL_IO is set */
-               iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+               iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
                        i + IWLAGN_RTC_INST_LOWER_BOUND);
-               val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+               val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
                if (val != le32_to_cpu(*image))
                        return -EIO;
        }
@@ -442,7 +490,7 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
        return 0;
 }
 
-static void iwl_print_mismatch_inst(struct iwl_priv *priv,
+static void iwl_print_mismatch_inst(struct iwl_bus *bus,
                                    struct fw_desc *fw_desc)
 {
        __le32 *image = (__le32 *)fw_desc->v_addr;
@@ -451,18 +499,18 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
        u32 offs;
        int errors = 0;
 
-       IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+       IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
 
-       iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+       iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
                           IWLAGN_RTC_INST_LOWER_BOUND);
 
        for (offs = 0;
             offs < len && errors < 20;
             offs += sizeof(u32), image++) {
                /* read data comes through single port, auto-incr addr */
-               val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+               val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
                if (val != le32_to_cpu(*image)) {
-                       IWL_ERR(priv, "uCode INST section at "
+                       IWL_ERR(bus, "uCode INST section at "
                                "offset 0x%x, is 0x%x, s/b 0x%x\n",
                                offs, val, le32_to_cpu(*image));
                        errors++;
@@ -474,16 +522,24 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
  * iwl_verify_ucode - determine which instruction image is in SRAM,
  *    and verify its contents
  */
-static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
+static int iwl_verify_ucode(struct iwl_trans *trans,
+                           enum iwl_ucode_type ucode_type)
 {
-       if (!iwl_verify_inst_sparse(priv, &img->code)) {
-               IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
+       struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+
+       if (!img) {
+               IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+               return -EINVAL;
+       }
+
+       if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
+               IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
                return 0;
        }
 
-       IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+       IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
 
-       iwl_print_mismatch_inst(priv, &img->code);
+       iwl_print_mismatch_inst(bus(trans), &img->code);
        return -EIO;
 }
 
@@ -492,7 +548,7 @@ struct iwlagn_alive_data {
        u8 subtype;
 };
 
-static void iwlagn_alive_fn(struct iwl_priv *priv,
+static void iwl_alive_fn(struct iwl_priv *priv,
                            struct iwl_rx_packet *pkt,
                            void *data)
 {
@@ -515,50 +571,115 @@ static void iwlagn_alive_fn(struct iwl_priv *priv,
        alive_data->valid = palive->is_valid == UCODE_VALID_OK;
 }
 
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+                                  struct iwl_notification_wait *wait_entry,
+                                  u8 cmd,
+                                  void (*fn)(struct iwl_priv *priv,
+                                             struct iwl_rx_packet *pkt,
+                                             void *data),
+                                  void *fn_data)
+{
+       wait_entry->fn = fn;
+       wait_entry->fn_data = fn_data;
+       wait_entry->cmd = cmd;
+       wait_entry->triggered = false;
+       wait_entry->aborted = false;
+
+       spin_lock_bh(&shrd->notif_wait_lock);
+       list_add(&wait_entry->list, &shrd->notif_waits);
+       spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+                            struct iwl_notification_wait *wait_entry,
+                            unsigned long timeout)
+{
+       int ret;
+
+       ret = wait_event_timeout(shrd->notif_waitq,
+                                wait_entry->triggered || wait_entry->aborted,
+                                timeout);
+
+       spin_lock_bh(&shrd->notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&shrd->notif_wait_lock);
+
+       if (wait_entry->aborted)
+               return -EIO;
+
+       /* return value is always >= 0 */
+       if (ret <= 0)
+               return -ETIMEDOUT;
+       return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+                               struct iwl_notification_wait *wait_entry)
+{
+       spin_lock_bh(&shrd->notif_wait_lock);
+       list_del(&wait_entry->list);
+       spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+       unsigned long flags;
+       struct iwl_notification_wait *wait_entry;
+
+       spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+       list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+               wait_entry->aborted = true;
+       spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+       wake_up_all(&shrd->notif_waitq);
+}
+
 #define UCODE_ALIVE_TIMEOUT    HZ
 #define UCODE_CALIB_TIMEOUT    (2*HZ)
 
 int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
-                                struct fw_img *image,
-                                enum iwlagn_ucode_type ucode_type)
+                                enum iwl_ucode_type ucode_type)
 {
        struct iwl_notification_wait alive_wait;
        struct iwlagn_alive_data alive_data;
+       struct iwl_trans *trans = trans(priv);
        int ret;
-       enum iwlagn_ucode_type old_type;
+       enum iwl_ucode_type old_type;
 
-       ret = iwl_trans_start_device(trans(priv));
+       ret = iwl_trans_start_device(trans);
        if (ret)
                return ret;
 
-       iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
-                                     iwlagn_alive_fn, &alive_data);
+       iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+                                     iwl_alive_fn, &alive_data);
 
-       old_type = priv->ucode_type;
-       priv->ucode_type = ucode_type;
+       old_type = trans->shrd->ucode_type;
+       trans->shrd->ucode_type = ucode_type;
 
-       ret = iwlagn_load_given_ucode(priv, image);
+       ret = iwl_load_given_ucode(trans, ucode_type);
        if (ret) {
-               priv->ucode_type = old_type;
-               iwlagn_remove_notification(priv, &alive_wait);
+               trans->shrd->ucode_type = old_type;
+               iwl_remove_notification(trans->shrd, &alive_wait);
                return ret;
        }
 
-       iwl_trans_kick_nic(trans(priv));
+       iwl_trans_kick_nic(trans);
 
        /*
         * Some things may run in the background now, but we
         * just wait for the ALIVE notification here.
         */
-       ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
+       ret = iwl_wait_notification(trans->shrd, &alive_wait,
+                                       UCODE_ALIVE_TIMEOUT);
        if (ret) {
-               priv->ucode_type = old_type;
+               trans->shrd->ucode_type = old_type;
                return ret;
        }
 
        if (!alive_data.valid) {
                IWL_ERR(priv, "Loaded ucode is not valid!\n");
-               priv->ucode_type = old_type;
+               trans->shrd->ucode_type = old_type;
                return -EIO;
        }
 
@@ -568,9 +689,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
         * skip it for WoWLAN.
         */
        if (ucode_type != IWL_UCODE_WOWLAN) {
-               ret = iwl_verify_ucode(priv, image);
+               ret = iwl_verify_ucode(trans, ucode_type);
                if (ret) {
-                       priv->ucode_type = old_type;
+                       trans->shrd->ucode_type = old_type;
                        return ret;
                }
 
@@ -578,11 +699,11 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
                msleep(5);
        }
 
-       ret = iwlagn_alive_notify(priv);
+       ret = iwl_alive_notify(priv);
        if (ret) {
                IWL_WARN(priv,
                        "Could not complete ALIVE transition: %d\n", ret);
-               priv->ucode_type = old_type;
+               trans->shrd->ucode_type = old_type;
                return ret;
        }
 
@@ -597,19 +718,18 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
        lockdep_assert_held(&priv->shrd->mutex);
 
        /* No init ucode required? Curious, but maybe ok */
-       if (!priv->ucode_init.code.len)
+       if (!trans(priv)->ucode_init.code.len)
                return 0;
 
-       if (priv->ucode_type != IWL_UCODE_NONE)
+       if (priv->shrd->ucode_type != IWL_UCODE_NONE)
                return 0;
 
-       iwlagn_init_notification_wait(priv, &calib_wait,
+       iwl_init_notification_wait(priv->shrd, &calib_wait,
                                      CALIBRATION_COMPLETE_NOTIFICATION,
                                      NULL, NULL);
 
        /* Will also start the device */
-       ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
-                                          IWL_UCODE_INIT);
+       ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
        if (ret)
                goto error;
 
@@ -621,12 +741,13 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
         * Some things may run in the background now, but we
         * just wait for the calibration complete notification.
         */
-       ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
+       ret = iwl_wait_notification(priv->shrd, &calib_wait,
+                                       UCODE_CALIB_TIMEOUT);
 
        goto out;
 
  error:
-       iwlagn_remove_notification(priv, &calib_wait);
+       iwl_remove_notification(priv->shrd, &calib_wait);
  out:
        /* Whatever happened, stop the device */
        iwl_trans_stop_device(trans(priv));
index c42be81..48e8218 100644 (file)
@@ -165,11 +165,15 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                                struct key_params *params)
 {
        struct iwm_priv *iwm = ndev_to_iwm(ndev);
-       struct iwm_key *key = &iwm->keys[key_index];
+       struct iwm_key *key;
        int ret;
 
        IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
 
+       if (key_index >= IWM_NUM_KEYS)
+               return -ENOENT;
+
+       key = &iwm->keys[key_index];
        memset(key, 0, sizeof(struct iwm_key));
        ret = iwm_key_init(key, key_index, mac_addr, params);
        if (ret < 0) {
@@ -214,8 +218,12 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
                                u8 key_index, bool pairwise, const u8 *mac_addr)
 {
        struct iwm_priv *iwm = ndev_to_iwm(ndev);
-       struct iwm_key *key = &iwm->keys[key_index];
+       struct iwm_key *key;
 
+       if (key_index >= IWM_NUM_KEYS)
+               return -ENOENT;
+
+       key = &iwm->keys[key_index];
        if (!iwm->keys[key_index].key_len) {
                IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
                return 0;
@@ -236,6 +244,9 @@ static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
 
        IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
 
+       if (key_index >= IWM_NUM_KEYS)
+               return -ENOENT;
+
        if (!iwm->keys[key_index].key_len) {
                IWM_ERR(iwm, "Key %d not used\n", key_index);
                return -EINVAL;
index a7f1ab2..d1d84e0 100644 (file)
@@ -485,6 +485,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
 static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
        struct cmd_header *resp)
 {
+       struct cfg80211_bss *bss;
        struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
        int bsssize;
        const u8 *pos;
@@ -632,12 +633,14 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
                                     LBS_SCAN_RSSI_TO_MBM(rssi)/100);
 
                        if (channel &&
-                           !(channel->flags & IEEE80211_CHAN_DISABLED))
-                               cfg80211_inform_bss(wiphy, channel,
+                           !(channel->flags & IEEE80211_CHAN_DISABLED)) {
+                               bss = cfg80211_inform_bss(wiphy, channel,
                                        bssid, get_unaligned_le64(tsfdesc),
                                        capa, intvl, ie, ielen,
                                        LBS_SCAN_RSSI_TO_MBM(rssi),
                                        GFP_KERNEL);
+                               cfg80211_put_bss(bss);
+                       }
                } else
                        lbs_deb_scan("scan response: missing BSS channel IE\n");
 
@@ -1720,6 +1723,7 @@ static void lbs_join_post(struct lbs_private *priv,
                   2 + 2 +                      /* atim */
                   2 + 8];                      /* extended rates */
        u8 *fake = fake_ie;
+       struct cfg80211_bss *bss;
 
        lbs_deb_enter(LBS_DEB_CFG80211);
 
@@ -1763,14 +1767,15 @@ static void lbs_join_post(struct lbs_private *priv,
        *fake++ = 0x6c;
        lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
 
-       cfg80211_inform_bss(priv->wdev->wiphy,
-                           params->channel,
-                           bssid,
-                           0,
-                           capability,
-                           params->beacon_interval,
-                           fake_ie, fake - fake_ie,
-                           0, GFP_KERNEL);
+       bss = cfg80211_inform_bss(priv->wdev->wiphy,
+                                 params->channel,
+                                 bssid,
+                                 0,
+                                 capability,
+                                 params->beacon_interval,
+                                 fake_ie, fake - fake_ie,
+                                 0, GFP_KERNEL);
+       cfg80211_put_bss(bss);
 
        memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
        priv->wdev->ssid_len = params->ssid_len;
index 885ddc1..f955b2d 100644 (file)
@@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
 {
        struct lbs_private *priv = dev->ml_priv;
 
-       snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
+       snprintf(info->fw_version, sizeof(info->fw_version),
+               "%u.%u.%u.p%u",
                priv->fwrelease >> 24 & 0xff,
                priv->fwrelease >> 16 & 0xff,
                priv->fwrelease >>  8 & 0xff,
                priv->fwrelease       & 0xff);
-       strcpy(info->driver, "libertas");
-       strcpy(info->version, lbs_driver_version);
+       strlcpy(info->driver, "libertas", sizeof(info->driver));
+       strlcpy(info->version, lbs_driver_version, sizeof(info->version));
 }
 
 /*
index 728baa4..50b1ee7 100644 (file)
@@ -1291,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
        .remove = __devexit_p(libertas_spi_remove),
        .driver = {
                .name   = "libertas_spi",
-               .bus    = &spi_bus_type,
                .owner  = THIS_MODULE,
                .pm     = &if_spi_pm_ops,
        },
index 523ad55..6cf6d6d 100644 (file)
@@ -1748,6 +1748,8 @@ static int __init init_mac80211_hwsim(void)
                            IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
                            IEEE80211_HW_AMPDU_AGGREGATION;
 
+               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
                /* ask mac80211 to reserve space for magic */
                hw->vif_data_size = sizeof(struct hwsim_vif_priv);
                hw->sta_data_size = sizeof(struct hwsim_sta_priv);
index 7aa9aa0..681d3f2 100644 (file)
@@ -33,7 +33,7 @@
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
  */
-static int
+static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
                                         struct mwifiex_rx_reorder_tbl
                                         *rx_reor_tbl_ptr, int start_win)
@@ -71,8 +71,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
 
        rx_reor_tbl_ptr->start_win = start_win;
        spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
-       return 0;
 }
 
 /*
@@ -83,7 +81,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
  */
-static int
+static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                              struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
 {
@@ -119,7 +117,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
        rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
                &(MAX_TID_VALUE - 1);
        spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-       return 0;
 }
 
 /*
@@ -405,7 +402,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                                u8 *ta, u8 pkt_type, void *payload)
 {
        struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
-       int start_win, end_win, win_size, ret;
+       int start_win, end_win, win_size;
        u16 pkt_index;
 
        rx_reor_tbl_ptr =
@@ -452,11 +449,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                        start_win = (end_win - win_size) + 1;
                else
                        start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
-               ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
+               mwifiex_11n_dispatch_pkt_until_start_win(priv,
                                                rx_reor_tbl_ptr, start_win);
-
-               if (ret)
-                       return ret;
        }
 
        if (pkt_type != PKT_TYPE_BAR) {
@@ -475,9 +469,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
         * Dispatch all packets sequentially from start_win until a
         * hole is found and adjust the start_win appropriately
         */
-       ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
+       mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
 
-       return ret;
+       return 0;
 }
 
 /*
index 8f2797a..2a078ce 100644 (file)
@@ -10,12 +10,12 @@ config MWIFIEX
          mwifiex.
 
 config MWIFIEX_SDIO
-       tristate "Marvell WiFi-Ex Driver for SD8787"
+       tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
        depends on MWIFIEX && MMC
        select FW_LOADER
        ---help---
          This adds support for wireless adapters based on Marvell
-         8787 chipset with SDIO interface.
+         8787/8797 chipsets with SDIO interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_sdio.
index 462c710..0db97cc 100644 (file)
@@ -120,10 +120,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
 static int
 mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
                              enum nl80211_tx_power_setting type,
-                             int dbm)
+                             int mbm)
 {
        struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
        struct mwifiex_power_cfg power_cfg;
+       int dbm = MBM_TO_DBM(mbm);
 
        if (type == NL80211_TX_POWER_FIXED) {
                power_cfg.is_power_auto = 0;
@@ -780,6 +781,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
 {
        struct ieee80211_channel *chan;
        struct mwifiex_bss_info bss_info;
+       struct cfg80211_bss *bss;
        int ie_len;
        u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
        enum ieee80211_band band;
@@ -800,9 +802,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
                        ieee80211_channel_to_frequency(bss_info.bss_chan,
                                                       band));
 
-       cfg80211_inform_bss(priv->wdev->wiphy, chan,
+       bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
                bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
                0, ie_buf, ie_len, 0, GFP_KERNEL);
+       cfg80211_put_bss(bss);
        memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
 
        return 0;
index f2e6de0..1782a77 100644 (file)
@@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
  * This function maps an index in supported rates table into
  * the corresponding data rate.
  */
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+                                                       u8 ht_info)
 {
-       u16 mcs_rate[4][8] = {
-               {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
-       ,                       /* LG 40M */
-       {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
-       ,                       /* SG 40M */
-       {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
-       ,                       /* LG 20M */
-       {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
-       };                      /* SG 20M */
-
+       /*
+        * For every mcs_rate line, the first 8 bytes are for stream 1x1,
+        * and all 16 bytes are for stream 2x2.
+        */
+       u16  mcs_rate[4][16] = {
+               /* LGI 40M */
+               { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+                 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+               /* SGI 40M */
+               { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+                 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+               /* LGI 20M */
+               { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+                 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+               /* SGI 20M */
+               { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+                 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+       };
+       u32 mcs_num_supp =
+               (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
        u32 rate;
 
        if (ht_info & BIT(0)) {
@@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
                                rate = 0x0D;    /* MCS 32 SGI rate */
                        else
                                rate = 0x0C;    /* MCS 32 LGI rate */
-               } else if (index < 8) {
+               } else if (index < mcs_num_supp) {
                        if (ht_info & BIT(1)) {
                                if (ht_info & BIT(2))
                                        /* SGI, 40M */
index 0cc5d73..62b8639 100644 (file)
@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
 
 #define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
 #define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_2X2  0x22
 
 #define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
 
@@ -673,7 +674,7 @@ struct host_cmd_ds_802_11_ad_hoc_start {
        union ieee_types_phy_param_set phy_param_set;
        u16 reserved1;
        __le16 cap_info_bitmap;
-       u8 DataRate[HOSTCMD_SUPPORTED_RATES];
+       u8 data_rate[HOSTCMD_SUPPORTED_RATES];
 } __packed;
 
 struct host_cmd_ds_802_11_ad_hoc_result {
index d792b3f..2694045 100644 (file)
@@ -187,8 +187,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
 
        skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
-       sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
-                                               (adapter->sleep_cfm->data);
 
        adapter->cmd_sent = false;
 
@@ -254,6 +252,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        mwifiex_wmm_init(adapter);
 
        if (adapter->sleep_cfm) {
+               sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+                                               adapter->sleep_cfm->data;
                memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
                sleep_cfm_buf->command =
                                cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
index 62b4c29..1c49813 100644 (file)
@@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        u32 cmd_append_size = 0;
        u32 i;
        u16 tmp_cap;
-       uint16_t ht_cap_info;
        struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+       u8 radio_type;
 
        struct mwifiex_ie_types_htcap *ht_cap;
        struct mwifiex_ie_types_htinfo *ht_info;
@@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
                bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
        }
 
-       memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
-       mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
+       memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
+       mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
        if ((adapter->adhoc_start_band & BAND_G) &&
            (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
                if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
@@ -850,20 +850,19 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
                }
        }
        /* Find the last non zero */
-       for (i = 0; i < sizeof(adhoc_start->DataRate) &&
-                       adhoc_start->DataRate[i];
-                       i++)
-                       ;
+       for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
+               if (!adhoc_start->data_rate[i])
+                       break;
 
        priv->curr_bss_params.num_of_rates = i;
 
        /* Copy the ad-hoc creating rates into Current BSS rate structure */
        memcpy(&priv->curr_bss_params.data_rates,
-              &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
+              &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
 
        dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
-              adhoc_start->DataRate[0], adhoc_start->DataRate[1],
-              adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
+              adhoc_start->data_rate[0], adhoc_start->data_rate[1],
+              adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
 
        dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
 
@@ -914,55 +913,40 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        }
 
        if (adapter->adhoc_11n_enabled) {
-               {
-                       ht_cap = (struct mwifiex_ie_types_htcap *) pos;
-                       memset(ht_cap, 0,
-                              sizeof(struct mwifiex_ie_types_htcap));
-                       ht_cap->header.type =
-                               cpu_to_le16(WLAN_EID_HT_CAPABILITY);
-                       ht_cap->header.len =
-                              cpu_to_le16(sizeof(struct ieee80211_ht_cap));
-                       ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
-
-                       ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
-                       if (adapter->chan_offset) {
-                               ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
-                               ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
-                               ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-                               SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
-                       }
+               /* Fill HT CAPABILITY */
+               ht_cap = (struct mwifiex_ie_types_htcap *) pos;
+               memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
+               ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+               ht_cap->header.len =
+                      cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+               radio_type = mwifiex_band_to_radio_type(
+                                       priv->adapter->config_bands);
+               mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+
+               pos += sizeof(struct mwifiex_ie_types_htcap);
+               cmd_append_size +=
+                       sizeof(struct mwifiex_ie_types_htcap);
 
-                       ht_cap->ht_cap.ampdu_params_info
-                                       = IEEE80211_HT_MAX_AMPDU_64K;
-                       ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
-                       pos += sizeof(struct mwifiex_ie_types_htcap);
-                       cmd_append_size +=
-                               sizeof(struct mwifiex_ie_types_htcap);
-               }
-               {
-                       ht_info = (struct mwifiex_ie_types_htinfo *) pos;
-                       memset(ht_info, 0,
-                              sizeof(struct mwifiex_ie_types_htinfo));
-                       ht_info->header.type =
-                               cpu_to_le16(WLAN_EID_HT_INFORMATION);
-                       ht_info->header.len =
-                               cpu_to_le16(sizeof(struct ieee80211_ht_info));
-                       ht_info->ht_info.control_chan =
-                               (u8) priv->curr_bss_params.bss_descriptor.
-                               channel;
-                       if (adapter->chan_offset) {
-                               ht_info->ht_info.ht_param =
-                                       adapter->chan_offset;
-                               ht_info->ht_info.ht_param |=
+               /* Fill HT INFORMATION */
+               ht_info = (struct mwifiex_ie_types_htinfo *) pos;
+               memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
+               ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
+               ht_info->header.len =
+                       cpu_to_le16(sizeof(struct ieee80211_ht_info));
+
+               ht_info->ht_info.control_chan =
+                       (u8) priv->curr_bss_params.bss_descriptor.channel;
+               if (adapter->chan_offset) {
+                       ht_info->ht_info.ht_param = adapter->chan_offset;
+                       ht_info->ht_info.ht_param |=
                                        IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
-                       }
-                       ht_info->ht_info.operation_mode =
-                            cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-                       ht_info->ht_info.basic_set[0] = 0xff;
-                       pos += sizeof(struct mwifiex_ie_types_htinfo);
-                       cmd_append_size +=
-                               sizeof(struct mwifiex_ie_types_htinfo);
                }
+               ht_info->ht_info.operation_mode =
+                    cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+               ht_info->ht_info.basic_set[0] = 0xff;
+               pos += sizeof(struct mwifiex_ie_types_htinfo);
+               cmd_append_size +=
+                       sizeof(struct mwifiex_ie_types_htinfo);
        }
 
        cmd->size = cpu_to_le16((u16)
index 30f138b..3861a61 100644 (file)
@@ -775,7 +775,8 @@ struct mwifiex_chan_freq_power *
 struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
                                                struct mwifiex_private *priv,
                                                u8 band, u32 freq);
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+                                                       u8 ht_info);
 u32 mwifiex_find_freq_from_band_chan(u8, u8);
 int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
                                u8 **buffer);
index d34acf0..a2f3200 100644 (file)
@@ -386,7 +386,6 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
        card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
        if (!card->txbd_ring_vbase) {
                dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
-               kfree(card->txbd_ring_vbase);
                return -1;
        }
        card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
@@ -1229,9 +1228,12 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
        if (!skb)
                return 0;
 
-       if (rdptr >= MWIFIEX_MAX_EVT_BD)
+       if (rdptr >= MWIFIEX_MAX_EVT_BD) {
                dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
                                        rdptr);
+               ret = -EINVAL;
+               goto done;
+       }
 
        /* Read the event ring write pointer set by firmware */
        if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
@@ -1672,9 +1674,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
                                     struct sk_buff *skb,
                                     struct mwifiex_tx_param *tx_param)
 {
-       if (!adapter || !skb) {
-               dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n",
-                               __func__, adapter, skb);
+       if (!skb) {
+               dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
                return -1;
        }
 
index 8d3ab37..b8b9d37 100644 (file)
@@ -1537,11 +1537,6 @@ done:
        return 0;
 }
 
-static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
-{
-       kfree(bss->priv);
-}
-
 /*
  * This function handles the command response of scan.
  *
@@ -1767,7 +1762,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                              cap_info_bitmap, beacon_period,
                                              ie_buf, ie_len, rssi, GFP_KERNEL);
                                *(u8 *)bss->priv = band;
-                               bss->free_priv = mwifiex_free_bss_priv;
+                               cfg80211_put_bss(bss);
 
                                if (priv->media_connected && !memcmp(bssid,
                                        priv->curr_bss_params.bss_descriptor
index 283171b..702452b 100644 (file)
@@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev)
 
 /* Device ID for SD8787 */
 #define SDIO_DEVICE_ID_MARVELL_8787   (0x9119)
+/* Device ID for SD8797 */
+#define SDIO_DEVICE_ID_MARVELL_8797   (0x9129)
 
 /* WLAN IDs */
 static const struct sdio_device_id mwifiex_ids[] = {
        {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
        {},
 };
 
@@ -1573,7 +1576,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        sdio_set_drvdata(func, card);
 
        adapter->dev = &func->dev;
-       strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+
+       switch (func->device) {
+       case SDIO_DEVICE_ID_MARVELL_8797:
+               strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
+               break;
+       case SDIO_DEVICE_ID_MARVELL_8787:
+       default:
+               strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+               break;
+       }
 
        return 0;
 
@@ -1630,14 +1642,14 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
        card->mpa_tx.pkt_cnt = 0;
        card->mpa_tx.start_port = 0;
 
-       card->mpa_tx.enabled = 0;
+       card->mpa_tx.enabled = 1;
        card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
 
        card->mpa_rx.buf_len = 0;
        card->mpa_rx.pkt_cnt = 0;
        card->mpa_rx.start_port = 0;
 
-       card->mpa_rx.enabled = 0;
+       card->mpa_rx.enabled = 1;
        card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
 
        /* Allocate buffers for SDIO MP-A */
@@ -1774,4 +1786,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
 MODULE_VERSION(SDIO_VERSION);
 MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
index 3f71180..a3fb322 100644 (file)
@@ -29,6 +29,7 @@
 #include "main.h"
 
 #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
 
 #define BLOCK_MODE     1
 #define BYTE_MODE      0
index 7a16b0c..e812db8 100644 (file)
@@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
        priv->tx_htinfo = resp->params.tx_rate.ht_info;
        if (!priv->is_data_rate_auto)
                priv->data_rate =
-                       mwifiex_index_to_data_rate(priv->tx_rate,
+                       mwifiex_index_to_data_rate(priv, priv->tx_rate,
                                                   priv->tx_htinfo);
 
        return 0;
index ea4a29b..4b6f553 100644 (file)
@@ -832,8 +832,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
 
        if (!ret) {
                if (rate->is_rate_auto)
-                       rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
-                                                       priv->tx_htinfo);
+                       rate->rate = mwifiex_index_to_data_rate(priv,
+                                       priv->tx_rate, priv->tx_htinfo);
                else
                        rate->rate = priv->data_rate;
        } else {
index 2743051..5e1ef7e 100644 (file)
@@ -126,6 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
        u16 rx_pkt_type;
        struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
 
+       if (!priv)
+               return -1;
+
        local_rx_pd = (struct rxpd *) (skb->data);
        rx_pkt_type = local_rx_pd->rx_pkt_type;
 
@@ -189,12 +192,11 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
                                             (u8) local_rx_pd->rx_pkt_type,
                                             skb);
 
-       if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
-               if (priv && (ret == -1))
-                       priv->stats.rx_dropped++;
-
+       if (ret || (rx_pkt_type == PKT_TYPE_BAR))
                dev_kfree_skb_any(skb);
-       }
+
+       if (ret)
+               priv->stats.rx_dropped++;
 
        return ret;
 }
index e99ca1c..96e39ed 100644 (file)
@@ -76,6 +76,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
 {
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct ieee80211_channel *channel;
+       struct cfg80211_bss *cbss;
        u8 *ie;
        u8 ie_buf[46];
        u64 timestamp;
@@ -121,9 +122,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
        beacon_interval = le16_to_cpu(bss->a.beacon_interv);
        signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
 
-       cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
-                           capability, beacon_interval, ie_buf, ie_len,
-                           signal, GFP_KERNEL);
+       cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
+                                  capability, beacon_interval, ie_buf, ie_len,
+                                  signal, GFP_KERNEL);
+       cfg80211_put_bss(cbss);
 }
 
 void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -132,6 +134,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
 {
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct ieee80211_channel *channel;
+       struct cfg80211_bss *cbss;
        const u8 *ie;
        u64 timestamp;
        s32 signal;
@@ -152,9 +155,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
        ie = bss->data;
        signal = SIGNAL_TO_MBM(bss->level);
 
-       cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
-                           capability, beacon_interval, ie, ie_len,
-                           signal, GFP_KERNEL);
+       cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
+                                  capability, beacon_interval, ie, ie_len,
+                                  signal, GFP_KERNEL);
+       cfg80211_put_bss(cbss);
 }
 
 void orinoco_add_hostscan_results(struct orinoco_private *priv,
index 78d0d69..7faed62 100644 (file)
@@ -581,11 +581,7 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
        struct p54s_priv *priv = dev->priv;
        unsigned long flags;
 
-       if (mutex_lock_interruptible(&priv->mutex)) {
-               /* FIXME: how to handle this error? */
-               return;
-       }
-
+       mutex_lock(&priv->mutex);
        WARN_ON(priv->fw_state != FW_STATE_READY);
 
        p54spi_power_off(priv);
@@ -704,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
 static struct spi_driver p54spi_driver = {
        .driver = {
                .name           = "p54spi",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index 6ed9c32..42b97bc 100644 (file)
@@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
 
        skb_unlink(skb, &priv->tx_queue);
        p54_tx_qos_accounting_free(priv, skb);
-       dev_kfree_skb_any(skb);
+       ieee80211_free_txskb(dev, skb);
 }
 EXPORT_SYMBOL_GPL(p54_free_skb);
 
@@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
                            &hdr_flags, &aid, &burst_allowed);
 
        if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
-               dev_kfree_skb_any(skb);
+               ieee80211_free_txskb(dev, skb);
                return;
        }
 
index bc2ba80..4e44b1a 100644 (file)
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
        return ret;
 }
 
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
-       PRISM2_SET_ENCRYPTION = 6,
-       PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
-       PRISM2_HOSTAPD_MLME = 13,
-       PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
 #define PRISM54_SET_WPA                        SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD                        SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED       SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
-       offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
-       u32 cmd;
-       u8 sta_addr[ETH_ALEN];
-       union {
-              struct {
-                      u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
-                      u32 flags;
-                      u32 err;
-                      u8 idx;
-                      u8 seq[8]; /* sequence counter (set: RX, get: TX) */
-                      u16 key_len;
-                      u8 key[0];
-                      } crypt;
-               struct {
-                       u8 len;
-                       u8 data[0];
-               } generic_elem;
-               struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
-                       u16 cmd;
-                       u16 reason_code;
-               } mlme;
-               struct {
-                       u8 ssid_len;
-                       u8 ssid[32];
-               } scan_req;
-       } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
-       struct prism2_hostapd_param *param,
-       int param_len)
-{
-       islpci_private *priv = netdev_priv(dev);
-       int rvalue = 0, force = 0;
-       int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
-       union oid_res_t r;
-
-       /* with the new API, it's impossible to get a NULL pointer.
-        * New version of iwconfig set the IW_ENCODE_NOKEY flag
-        * when no key is given, but older versions don't. */
-
-       if (param->u.crypt.key_len > 0) {
-               /* we have a key to set */
-               int index = param->u.crypt.idx;
-               int current_index;
-               struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
-               /* get the current key index */
-               rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
-               current_index = r.u;
-               /* Verify that the key is not marked as invalid */
-               if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
-                       key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
-                           sizeof (param->u.crypt.key) : param->u.crypt.key_len;
-                       memcpy(key.key, param->u.crypt.key, key.length);
-                       if (key.length == 32)
-                               /* we want WPA-PSK */
-                               key.type = DOT11_PRIV_TKIP;
-                       if ((index < 0) || (index > 3))
-                               /* no index provided use the current one */
-                               index = current_index;
-
-                       /* now send the key to the card  */
-                       rvalue |=
-                           mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
-                                           &key);
-               }
-               /*
-                * If a valid key is set, encryption should be enabled
-                * (user may turn it off later).
-                * This is also how "iwconfig ethX key on" works
-                */
-               if ((index == current_index) && (key.length > 0))
-                       force = 1;
-       } else {
-               int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
-               if ((index >= 0) && (index <= 3)) {
-                       /* we want to set the key index */
-                       rvalue |=
-                           mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
-                                           &index);
-               } else {
-                       if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
-                               /* we cannot do anything. Complain. */
-                               return -EINVAL;
-                       }
-               }
-       }
-       /* now read the flags */
-       if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
-               /* Encoding disabled,
-                * authen = DOT11_AUTH_OS;
-                * invoke = 0;
-                * exunencrypt = 0; */
-       }
-       if (param->u.crypt.flags & IW_ENCODE_OPEN)
-               /* Encode but accept non-encoded packets. No auth */
-               invoke = 1;
-       if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
-               /* Refuse non-encoded packets. Auth */
-               authen = DOT11_AUTH_BOTH;
-               invoke = 1;
-               exunencrypt = 1;
-       }
-       /* do the change if requested  */
-       if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
-               rvalue |=
-                   mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
-               rvalue |=
-                   mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
-               rvalue |=
-                   mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
-                                   &exunencrypt);
-       }
-       return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
-       struct prism2_hostapd_param *param,
-       int param_len)
-{
-       islpci_private *priv = netdev_priv(ndev);
-       int max_len, len, alen, ret=0;
-       struct obj_attachment *attach;
-
-       len = param->u.generic_elem.len;
-       max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
-       if (max_len < 0 || max_len < len)
-               return -EINVAL;
-
-       alen = sizeof(*attach) + len;
-       attach = kzalloc(alen, GFP_KERNEL);
-       if (attach == NULL)
-               return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
-       /* Note: endianness is covered by mgt_set_varlen */
-
-       attach->type = (WLAN_FC_TYPE_MGMT << 2) |
-               (WLAN_FC_STYPE_ASSOC_REQ << 4);
-       attach->id = -1;
-       attach->size = len;
-       memcpy(attach->data, param->u.generic_elem.data, len);
-
-       ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
-       if (ret == 0) {
-               attach->type = (WLAN_FC_TYPE_MGMT << 2) |
-                       (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
-              ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
-              if (ret == 0)
-                      printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
-                                      ndev->name);
-       }
-
-       kfree(attach);
-       return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
-       return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
-                     struct prism2_hostapd_param *param)
-{
-       islpci_private *priv = netdev_priv(ndev);
-       struct iw_request_info info;
-       int i, rvalue;
-       struct obj_bsslist *bsslist;
-       u32 noise = 0;
-       char *extra = "";
-       char *current_ev = "foo";
-       union oid_res_t r;
-
-       if (islpci_get_state(priv) < PRV_STATE_INIT) {
-               /* device is not ready, fail gently */
-               return 0;
-       }
-
-       /* first get the noise value. We will use it to report the link quality */
-       rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
-       noise = r.u;
-
-       /* Ask the device for a list of known bss. We can report at most
-        * IW_MAX_AP=64 to the range struct. But the device won't repport anything
-        * if you change the value of IWMAX_BSS=24.
-        */
-       rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
-       bsslist = r.ptr;
-
-       info.cmd = PRISM54_HOSTAPD;
-       info.flags = 0;
-
-       /* ok now, scan the list and translate its info */
-       for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
-               current_ev = prism54_translate_bss(ndev, &info, current_ev,
-                                                  extra + IW_SCAN_MAX_DATA,
-                                                  &(bsslist->bsslist[i]),
-                                                  noise);
-       kfree(bsslist);
-
-       return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
-       struct prism2_hostapd_param *param;
-       int ret = 0;
-       u32 uwrq;
-
-       printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
-       if (p->length < sizeof(struct prism2_hostapd_param) ||
-           p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
-               return -EINVAL;
-
-       param = memdup_user(p->pointer, p->length);
-       if (IS_ERR(param))
-               return PTR_ERR(param);
-
-       switch (param->cmd) {
-       case PRISM2_SET_ENCRYPTION:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
-                              ndev->name);
-               ret = prism2_ioctl_set_encryption(ndev, param, p->length);
-               break;
-       case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
-                              ndev->name);
-               ret = prism2_ioctl_set_generic_element(ndev, param,
-                                                      p->length);
-               break;
-       case PRISM2_HOSTAPD_MLME:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
-                              ndev->name);
-               ret = prism2_ioctl_mlme(ndev, param);
-               break;
-       case PRISM2_HOSTAPD_SCAN_REQ:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
-                              ndev->name);
-               ret = prism2_ioctl_scan_req(ndev, param);
-               break;
-       case PRISM54_SET_WPA:
-              printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
-                              ndev->name);
-              uwrq = 1;
-              ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
-              break;
-       case PRISM54_DROP_UNENCRYPTED:
-              printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
-                              ndev->name);
-#if 0
-              uwrq = 0x01;
-              mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
-              down_write(&priv->mib_sem);
-              mgt_commit(priv);
-              up_write(&priv->mib_sem);
-#endif
-              /* Not necessary, as set_wpa does it, should we just do it here though? */
-              ret = 0;
-              break;
-       default:
-              printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
-                              ndev->name);
-               ret = -EOPNOTSUPP;
-               break;
-       }
-
-       if (ret == 0 && copy_to_user(p->pointer, param, p->length))
-               ret = -EFAULT;
-
-       kfree(param);
-
-       return ret;
-}
 
 static int
 prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
        .private_args = (struct iw_priv_args *) prism54_private_args,
        .get_wireless_stats = prism54_get_wireless_stats,
 };
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
-       struct iwreq *wrq = (struct iwreq *) rq;
-       int ret = -1;
-       switch (cmd) {
-               case PRISM54_HOSTAPD:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               ret = prism54_hostapd(ndev, &wrq->u.data);
-               return ret;
-       }
-       return -EOPNOTSUPP;
-}
index bcfbfb9..a34bceb 100644 (file)
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
 
 int prism54_set_mac_address(struct net_device *, void *);
 
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
 extern const struct iw_handler_def prism54_handler_def;
 
 #endif                         /* _ISL_IOCTL_H */
index 5d0f615..5970ff6 100644 (file)
@@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev)
 static void islpci_ethtool_get_drvinfo(struct net_device *dev,
                                        struct ethtool_drvinfo *info)
 {
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 }
 
 static const struct ethtool_ops islpci_ethtool_ops = {
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
 static const struct net_device_ops islpci_netdev_ops = {
        .ndo_open               = islpci_open,
        .ndo_stop               = islpci_close,
-       .ndo_do_ioctl           = prism54_ioctl,
        .ndo_start_xmit         = islpci_eth_transmit,
        .ndo_tx_timeout         = islpci_eth_tx_timeout,
        .ndo_set_mac_address    = prism54_set_mac_address,
index 0021e49..04fec1f 100644 (file)
@@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
                            unsigned int pkt_addr, int rx_len)
 {
        UCHAR buff[256];
-       struct rx_msg *msg = (struct rx_msg *)buff;
+       struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
 
        del_timer(&local->timer);
 
@@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
                              unsigned int pkt_addr, int rx_len)
 {
 /*  UCHAR buff[256];
-    struct rx_msg *msg = (struct rx_msg *)buff;
+    struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
 */
        pr_debug("Deauthentication frame received\n");
        local->authentication_state = UNAUTHENTICATED;
index d7646f2..3c3b98b 100644 (file)
@@ -566,9 +566,9 @@ struct phy_header {
     UCHAR hdr_3;
     UCHAR hdr_4;
 };
-struct rx_msg {
+struct ray_rx_msg {
     struct mac_header mac;
-    UCHAR  var[1];
+    UCHAR  var[0];
 };
 
 struct tx_msg {
index 0c13840..3802c31 100644 (file)
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
        NDIS_80211_POWER_MODE_FAST_PSP,
 };
 
+enum ndis_80211_pmkid_cand_list_flag_bits {
+       NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
+};
+
 struct ndis_80211_auth_request {
        __le32 length;
        u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
 struct ndis_80211_bssid_info {
        u8 bssid[6];
        u8 pmkid[16];
-};
+} __packed;
 
 struct ndis_80211_pmkid {
        __le32 length;
        __le32 bssid_info_count;
        struct ndis_80211_bssid_info bssid_info[0];
-};
+} __packed;
 
 /*
  *  private data
  */
-#define NET_TYPE_11FB  0
-
 #define CAP_MODE_80211A                1
 #define CAP_MODE_80211B                2
 #define CAP_MODE_80211G                4
@@ -414,6 +416,7 @@ struct ndis_80211_pmkid {
 #define RNDIS_WLAN_ALG_TKIP    (1<<1)
 #define RNDIS_WLAN_ALG_CCMP    (1<<2)
 
+#define RNDIS_WLAN_NUM_KEYS            4
 #define RNDIS_WLAN_KEY_MGMT_NONE       0
 #define RNDIS_WLAN_KEY_MGMT_802_1X     (1<<0)
 #define RNDIS_WLAN_KEY_MGMT_PSK                (1<<1)
@@ -516,7 +519,7 @@ struct rndis_wlan_private {
 
        /* encryption stuff */
        int  encr_tx_key_index;
-       struct rndis_wlan_encr_key encr_keys[4];
+       struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
        int  wpa_version;
 
        u8 command_buffer[COMMAND_BUFFER_SIZE];
@@ -1346,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
        return ret;
 }
 
+static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
+                                                    u16 *beacon_interval)
+{
+       struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+       struct ieee80211_channel *channel;
+       struct ndis_80211_conf config;
+       int len, ret;
+
+       /* Get channel and beacon interval */
+       len = sizeof(config);
+       ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+       netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+                               __func__, ret);
+       if (ret < 0)
+               return NULL;
+
+       channel = ieee80211_get_channel(priv->wdev.wiphy,
+                               KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+       if (!channel)
+               return NULL;
+
+       if (beacon_interval)
+               *beacon_interval = le16_to_cpu(config.beacon_period);
+       return channel;
+}
+
 /* index must be 0 - N, as per NDIS  */
 static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
                                                                int index)
@@ -1535,6 +1564,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
        bool is_wpa;
        int ret;
 
+       if (index >= RNDIS_WLAN_NUM_KEYS)
+               return -ENOENT;
+
        if (priv->encr_keys[index].len == 0)
                return 0;
 
@@ -1972,11 +2004,12 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
        return ret;
 }
 
-static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
-                                       struct ndis_80211_bssid_ex *bssid)
+static bool rndis_bss_info_update(struct usbnet *usbdev,
+                                 struct ndis_80211_bssid_ex *bssid)
 {
        struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
        struct ieee80211_channel *channel;
+       struct cfg80211_bss *bss;
        s32 signal;
        u64 timestamp;
        u16 capability;
@@ -2015,9 +2048,12 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
        capability = le16_to_cpu(fixed->capabilities);
        beacon_interval = le16_to_cpu(fixed->beacon_interval);
 
-       return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
+       bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
                timestamp, capability, beacon_interval, ie, ie_len, signal,
                GFP_KERNEL);
+       cfg80211_put_bss(bss);
+
+       return (bss != NULL);
 }
 
 static struct ndis_80211_bssid_ex *next_bssid_list_item(
@@ -2451,6 +2487,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
 
        netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
 
+       if (key_index >= RNDIS_WLAN_NUM_KEYS)
+               return -ENOENT;
+
        priv->encr_tx_key_index = key_index;
 
        if (is_wpa_key(priv, key_index))
@@ -2639,12 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
 {
        struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
        struct ieee80211_channel *channel;
-       struct ndis_80211_conf config;
        struct ndis_80211_ssid ssid;
+       struct cfg80211_bss *bss;
        s32 signal;
        u64 timestamp;
        u16 capability;
-       u16 beacon_interval;
+       u16 beacon_interval = 0;
        __le32 rssi;
        u8 ie_buf[34];
        int len, ret, ie_len;
@@ -2669,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
        }
 
        /* Get channel and beacon interval */
-       len = sizeof(config);
-       ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
-       netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
-                               __func__, ret);
-       if (ret >= 0) {
-               beacon_interval = le16_to_cpu(config.beacon_period);
-               channel = ieee80211_get_channel(priv->wdev.wiphy,
-                               KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
-               if (!channel) {
-                       netdev_warn(usbdev->net, "%s(): could not get channel."
-                                                "\n", __func__);
-                       return;
-               }
-       } else {
-               netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
-                                        __func__);
+       channel = get_current_channel(usbdev, &beacon_interval);
+       if (!channel) {
+               netdev_warn(usbdev->net, "%s(): could not get channel.\n",
+                                       __func__);
                return;
        }
 
@@ -2714,9 +2741,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
                bssid, (u32)timestamp, capability, beacon_interval, ie_len,
                ssid.essid, signal);
 
-       cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
+       bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
                timestamp, capability, beacon_interval, ie_buf, ie_len,
                signal, GFP_KERNEL);
+       cfg80211_put_bss(bss);
 }
 
 /*
@@ -2828,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
                                                req_ie_len, resp_ie,
                                                resp_ie_len, 0, GFP_KERNEL);
                else
-                       cfg80211_roamed(usbdev->net, NULL, bssid,
-                                       req_ie, req_ie_len,
+                       cfg80211_roamed(usbdev->net,
+                                       get_current_channel(usbdev, NULL),
+                                       bssid, req_ie, req_ie_len,
                                        resp_ie, resp_ie_len, GFP_KERNEL);
        } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
                cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -2995,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
        for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
                struct ndis_80211_pmkid_candidate *cand =
                                                &cand_list->candidate_list[i];
+               bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
 
-               netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
-                          i, le32_to_cpu(cand->flags), cand->bssid);
-
-#if 0
-               struct iw_pmkid_cand pcand;
-               union iwreq_data wrqu;
+               netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
+                          i, le32_to_cpu(cand->flags), preauth, cand->bssid);
 
-               memset(&pcand, 0, sizeof(pcand));
-               if (le32_to_cpu(cand->flags) & 0x01)
-                       pcand.flags |= IW_PMKID_CAND_PREAUTH;
-               pcand.index = i;
-               memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
-
-               memset(&wrqu, 0, sizeof(wrqu));
-               wrqu.data.length = sizeof(pcand);
-               wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
-                                                               (u8 *)&pcand);
-#endif
+               cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
+                                               preauth, GFP_ATOMIC);
        }
 }
 
index 1ba079d..e5df380 100644 (file)
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_CONTROL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
                           !(filter_flags & FIF_PSPOLL));
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+                          !(filter_flags & FIF_CONTROL));
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+                          !(filter_flags & FIF_CONTROL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
                           !(filter_flags & FIF_CONTROL));
        rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
index edd317f..c3e1aa7 100644 (file)
@@ -831,11 +831,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
        if (spec->supported_rates & SUPPORT_RATE_OFDM)
                num_rates += 8;
 
-       channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+       channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
        if (!channels)
                return -ENOMEM;
 
-       rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+       rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
        if (!rates)
                goto exit_free_channels;
 
index b4ce934..a13ecfc 100644 (file)
@@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
        if (is_valid_ether_addr(rtlefuse->dev_addr)) {
                SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
        } else {
-               u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
-               get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
-               SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+               u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+               get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
+               SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
        }
 
 }
index 4ae9059..f66b575 100644 (file)
@@ -76,7 +76,7 @@ enum ap_peer {
        SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
 
 #define SET_80211_PS_POLL_AID(_hdr, _val)              \
-       (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+       (*(u16 *)((u8 *)(_hdr) + 2) = _val)
 #define SET_80211_PS_POLL_BSSID(_hdr, _val)            \
        memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
 #define SET_80211_PS_POLL_TA(_hdr, _val)               \
index eb61061..91f0525 100644 (file)
@@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        unsigned long flags;
        u32 inta = 0;
        u32 intb = 0;
+       irqreturn_t ret = IRQ_HANDLED;
 
        spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 
@@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
 
        /*Shared IRQ or HW disappared */
-       if (!inta || inta == 0xffff)
+       if (!inta || inta == 0xffff) {
+               ret = IRQ_NONE;
                goto done;
+       }
 
        /*<1> beacon related */
        if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -890,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
        if (rtlpriv->rtlhal.earlymode_enable)
                tasklet_schedule(&rtlpriv->works.irq_tasklet);
 
-       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-       return IRQ_HANDLED;
-
 done:
        spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-       return IRQ_HANDLED;
+       return ret;
 }
 
 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
index 950c65a..931d979 100644 (file)
@@ -73,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
        }
 }
 
+static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
+                               u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
+       u8 *bufferPtr = (u8 *) buffer;
+       u32 i, offset, blockCount, remainSize;
+
+       blockCount = size / blockSize;
+       remainSize = size % blockSize;
+
+       for (i = 0; i < blockCount; i++) {
+               offset = i * blockSize;
+               rtlpriv->io.writeN_sync(rtlpriv,
+                                       (FW_8192C_START_ADDRESS + offset),
+                                       (void *)(bufferPtr + offset),
+                                       blockSize);
+       }
+
+       if (remainSize) {
+               offset = blockCount * blockSize;
+               rtlpriv->io.writeN_sync(rtlpriv,
+                                       (FW_8192C_START_ADDRESS + offset),
+                                       (void *)(bufferPtr + offset),
+                                       remainSize);
+       }
+}
+
 static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
                                   const u8 *buffer, u32 size)
 {
@@ -81,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
        u8 *bufferPtr = (u8 *) buffer;
        u32 *pu4BytePtr = (u32 *) buffer;
        u32 i, offset, blockCount, remainSize;
+       u32 data;
 
+       if (rtlpriv->io.writeN_sync) {
+               rtl_block_fw_writeN(hw, buffer, size);
+               return;
+       }
        blockCount = size / blockSize;
        remainSize = size % blockSize;
+       if (remainSize) {
+               /* the last word is < 4 bytes - pad it with zeros */
+               for (i = 0; i < 4 - remainSize; i++)
+                       *(bufferPtr + size + i) = 0;
+               blockCount++;
+       }
 
        for (i = 0; i < blockCount; i++) {
                offset = i * blockSize;
+               /* for big-endian platforms, the firmware data need to be byte
+                * swapped as it was read as a byte string and will be written
+                * as 32-bit dwords and byte swapped when written
+                */
+               data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
                rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4BytePtr + i));
-       }
-
-       if (remainSize) {
-               offset = blockCount * blockSize;
-               bufferPtr += offset;
-               for (i = 0; i < remainSize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
-                                                offset + i), *(bufferPtr + i));
-               }
+                               data);
        }
 }
 
@@ -227,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        u32 fwsize;
        enum version_8192c version = rtlhal->version;
 
-       pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
        if (!rtlhal->pfirmware)
                return 1;
 
+       pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
        pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
        pfwdata = (u8 *) rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
@@ -238,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        if (IS_FW_HEADER_EXIST(pfwheader)) {
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
-                         pfwheader->version, pfwheader->signature,
-                         (uint)sizeof(struct rtl92c_firmware_header)));
+                        le16_to_cpu(pfwheader->version),
+                        le16_to_cpu(pfwheader->signature),
+                        (uint)sizeof(struct rtl92c_firmware_header)));
 
                pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
                fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
index 3d5823c..cec5a3a 100644 (file)
 
 #define FW_8192C_SIZE                          0x3000
 #define FW_8192C_START_ADDRESS                 0x1000
-#define FW_8192C_END_ADDRESS                   0x3FFF
+#define FW_8192C_END_ADDRESS                   0x1FFF
 #define FW_8192C_PAGE_SIZE                     4096
 #define FW_8192C_POLLING_DELAY                 5
 #define FW_8192C_POLLING_TIMEOUT_COUNT         100
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)    \
-       ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
-       (_pfwhdr->signature&0xFFF0) == 0x88C0)
+       ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
+       (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
 
 struct rtl92c_firmware_header {
-       u16 signature;
+       __le16 signature;
        u8 category;
        u8 function;
-       u16 version;
+       __le16 version;
        u8 subversion;
        u8 rsvd1;
        u8 month;
        u8 date;
        u8 hour;
        u8 minute;
-       u16 ramcodeSize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
+       __le16 ramcodeSize;
+       __le16 rsvd2;
+       __le32 svnindex;
+       __le32 rsvd3;
+       __le32 rsvd4;
+       __le32 rsvd5;
 };
 
 enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
 void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
 void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
 void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
 
 #endif
index 814c05d..4ed973a 100644 (file)
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
        }
        RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
                      hwinfo, HWSET_MAX_SIZE);
-       eeprom_id = *((u16 *)&hwinfo[0]);
+       eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
        if (eeprom_id != RTL8190_EEPROM_ID) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
        pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
        _rtl92cu_read_txpower_info_from_hwpg(hw,
                                           rtlefuse->autoload_failflag, hwinfo);
-       rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
-       rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+       rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
+       rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                 (" VID = 0x%02x PID = 0x%02x\n",
                 rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
        rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
-       rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+       rtlefuse->eeprom_version =
+                        le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
        rtlefuse->txpwr_fromeprom = true;
        rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
index 060a06f..9e0c8fc 100644 (file)
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
                }
        }
        rtlhal->version  = (enum version_8192c)chip_version;
+       pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
        switch (rtlhal->version) {
        case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
index c244f2f..94a3e17 100644 (file)
@@ -275,6 +275,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
 
        /****** 8188CU ********/
+       /* RTL8188CTV */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
        /* 8188CE-VAU USB minCard */
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
        /* 8188cu 1*1 dongle */
@@ -291,14 +293,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
        /* 8188RU in Alfa AWUS036NHR */
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+       /* RTL8188CUS-VL */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
        /* 8188 Combo for BC4 */
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
 
        /****** 8192CU ********/
-       /* 8191cu 1*2 */
-       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
        /* 8192cu 2*2 */
-       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
        /* 8192CE-VAU USB minCard */
        {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
 
@@ -309,13 +311,17 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
-       {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+       {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
        {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+       {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
        {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+       /*SW-WF02-AD15 -Abocom*/
+       {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
        {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
        {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
@@ -326,14 +332,36 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
        {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
 
+       /****** 8188 RU ********/
+       /* Netcore */
+       {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+
+       /****** 8188CUS Slim Solo********/
+       {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
+       {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
+       {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+
+       /****** 8188CUS Slim Combo ********/
+       {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
+       {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
+       {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
+       {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
+
        /****** 8192CU ********/
+       {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
+       {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
        {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
        {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+       {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+       {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+       {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
        {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
        {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
        {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+       {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
        {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
        {}
 };
index bc33b14..b3cc7b9 100644 (file)
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
        SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
        for (index = 0; index < 16; index++)
                checksum = checksum ^ (*(ptr + index));
-       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+       SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
 }
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
index 54cb8a6..e956fa7 100644 (file)
 #include "usb.h"
 #include "base.h"
 #include "ps.h"
+#include "rtl8192c/fw_common.h"
 
 #define        REALTEK_USB_VENQT_READ                  0xC0
 #define        REALTEK_USB_VENQT_WRITE                 0x40
 #define REALTEK_USB_VENQT_CMD_REQ              0x05
 #define        REALTEK_USB_VENQT_CMD_IDX               0x00
 
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE         254
+#define MAX_USBCTRL_VENDORREQ_TIMES            10
 
 static void usbctrl_async_callback(struct urb *urb)
 {
@@ -82,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
        dr->wValue = cpu_to_le16(value);
        dr->wIndex = cpu_to_le16(index);
        dr->wLength = cpu_to_le16(len);
+       /* data are already in little-endian order */
        memcpy(buf, pdata, len);
        usb_fill_control_urb(urb, udev, pipe,
                             (unsigned char *)dr, buf, len,
@@ -100,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
        unsigned int pipe;
        int status;
        u8 reqtype;
+       int vendorreq_times = 0;
+       static int count;
 
        pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
        reqtype =  REALTEK_USB_VENQT_READ;
 
-       status = usb_control_msg(udev, pipe, request, reqtype, value, index,
-                                pdata, len, 0); /* max. timeout */
+       do {
+               status = usb_control_msg(udev, pipe, request, reqtype, value,
+                                        index, pdata, len, 0); /*max. timeout*/
+               if (status < 0) {
+                       /* firmware download is checksumed, don't retry */
+                       if ((value >= FW_8192C_START_ADDRESS &&
+                           value <= FW_8192C_END_ADDRESS))
+                               break;
+               } else {
+                       break;
+               }
+       } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
 
-       if (status < 0)
+       if (status < 0 && count++ < 4)
                pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
-                      value, status, *(u32 *)pdata);
+                      value, status, le32_to_cpu(*(u32 *)pdata));
        return status;
 }
 
@@ -129,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
 
        wvalue = (u16)addr;
        _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
-       ret = *data;
+       ret = le32_to_cpu(*data);
        kfree(data);
        return ret;
 }
@@ -161,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
        u8 request;
        u16 wvalue;
        u16 index;
-       u32 data;
+       __le32 data;
 
        request = REALTEK_USB_VENQT_CMD_REQ;
        index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
        wvalue = (u16)(addr&0x0000ffff);
-       data = val;
+       data = cpu_to_le32(val);
        _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
                                       len);
 }
@@ -192,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
        _usb_write_async(to_usb_device(dev), addr, val, 4);
 }
 
+static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+                            u16 len)
+{
+       struct device *dev = rtlpriv->io.dev;
+       struct usb_device *udev = to_usb_device(dev);
+       u8 request = REALTEK_USB_VENQT_CMD_REQ;
+       u8 reqtype =  REALTEK_USB_VENQT_WRITE;
+       u16 wvalue;
+       u16 index = REALTEK_USB_VENQT_CMD_IDX;
+       int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+       u8 *buffer;
+       dma_addr_t dma_addr;
+
+       wvalue = (u16)(addr&0x0000ffff);
+       buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+       if (!buffer)
+               return;
+       memcpy(buffer, data, len);
+       usb_control_msg(udev, pipe, request, reqtype, wvalue,
+                       index, buffer, len, 50);
+
+       usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+}
+
 static void _rtl_usb_io_handler_init(struct device *dev,
                                     struct ieee80211_hw *hw)
 {
@@ -205,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
        rtlpriv->io.read8_sync          = _usb_read8_sync;
        rtlpriv->io.read16_sync         = _usb_read16_sync;
        rtlpriv->io.read32_sync         = _usb_read32_sync;
+       rtlpriv->io.writeN_sync         = _usb_writeN_sync;
 }
 
 static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
index 713c7dd..f3c132b 100644 (file)
@@ -63,6 +63,7 @@
 #define AC_MAX                                 4
 #define QOS_QUEUE_NUM                          4
 #define RTL_MAC80211_NUM_QUEUE                 5
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE         254
 
 #define QBSS_LOAD_SIZE                         5
 #define MAX_WMMELE_LENGTH                      64
@@ -943,8 +944,10 @@ struct rtl_io {
        unsigned long pci_base_addr;    /*device I/O address */
 
        void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
-       void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
-       void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
+       void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+       void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+       void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
+                            u16 len);
 
        u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
        u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
index eaa5f95..6248c35 100644 (file)
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
 static struct spi_driver wl1251_spi_driver = {
        .driver = {
                .name           = DRIVER_NAME,
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index 3fe388b..af08c86 100644 (file)
@@ -42,16 +42,6 @@ config WL12XX_SDIO
          If you choose to build a module, it'll be called wl12xx_sdio.
          Say N if unsure.
 
-config WL12XX_SDIO_TEST
-       tristate "TI wl12xx SDIO testing support"
-       depends on WL12XX && MMC && WL12XX_SDIO
-       default n
-       ---help---
-         This module adds support for the SDIO bus testing with the
-         TI wl12xx chipsets.  You probably don't want this unless you are
-         testing a new hardware platform.  Select this if you want to test the
-         SDIO bus which is connected to the wl12xx chip.
-
 config WL12XX_PLATFORM_DATA
        bool
        depends on WL12XX_SDIO != n || WL1251_SDIO != n
index 621b348..fe67262 100644 (file)
@@ -3,14 +3,11 @@ wl12xx-objs           = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
 
 wl12xx_spi-objs        = spi.o
 wl12xx_sdio-objs       = sdio.o
-wl12xx_sdio_test-objs  = sdio_test.o
 
 wl12xx-$(CONFIG_NL80211_TESTMODE)      += testmode.o
 obj-$(CONFIG_WL12XX)                   += wl12xx.o
 obj-$(CONFIG_WL12XX_SPI)               += wl12xx_spi.o
 obj-$(CONFIG_WL12XX_SDIO)              += wl12xx_sdio.o
 
-obj-$(CONFIG_WL12XX_SDIO_TEST)         += wl12xx_sdio_test.o
-
 # small builtin driver bit
 obj-$(CONFIG_WL12XX_PLATFORM_DATA)     += wl12xx_platform_data.o
index ca044a7..bde1d86 100644 (file)
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "reg.h"
 #include "ps.h"
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_wake_up_condition *wake_up;
        int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
                goto out;
        }
 
-       wake_up->role_id = wl->role_id;
+       wake_up->role_id = wlvif->role_id;
        wake_up->wake_up_event = wl->conf.conn.wake_up_event;
        wake_up->listen_interval = wl->conf.conn.listen_interval;
 
@@ -84,7 +85,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       int power)
 {
        struct acx_current_tx_power *acx;
        int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->current_tx_power = power * 10;
 
        ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_feature_config *feature;
        int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
        }
 
        /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
-       feature->role_id = wl->role_id;
+       feature->role_id = wlvif->role_id;
        feature->data_flow_options = 0;
        feature->options = 0;
 
@@ -210,7 +212,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                   enum acx_slot_type slot_time)
 {
        struct acx_slot *slot;
        int ret;
@@ -223,7 +226,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
                goto out;
        }
 
-       slot->role_id = wl->role_id;
+       slot->role_id = wlvif->role_id;
        slot->wone_index = STATION_WONE_INDEX;
        slot->slot_time = slot_time;
 
@@ -238,8 +241,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
-                                void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable, void *mc_list, u32 mc_list_len)
 {
        struct acx_dot11_grp_addr_tbl *acx;
        int ret;
@@ -253,7 +256,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
        }
 
        /* MAC filtering */
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->enabled = enable;
        acx->num_groups = mc_list_len;
        memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +273,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif)
 {
        struct acx_rx_timeout *rx_timeout;
        int ret;
@@ -283,7 +287,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_ACX, "acx service period timeout");
 
-       rx_timeout->role_id = wl->role_id;
+       rx_timeout->role_id = wlvif->role_id;
        rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
        rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
 
@@ -300,7 +304,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u32 rts_threshold)
 {
        struct acx_rts_threshold *rts;
        int ret;
@@ -320,7 +325,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
                goto out;
        }
 
-       rts->role_id = wl->role_id;
+       rts->role_id = wlvif->role_id;
        rts->threshold = cpu_to_le16((u16)rts_threshold);
 
        ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +368,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable_filter)
 {
        struct acx_beacon_filter_option *beacon_filter = NULL;
        int ret = 0;
@@ -380,7 +386,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
                goto out;
        }
 
-       beacon_filter->role_id = wl->role_id;
+       beacon_filter->role_id = wlvif->role_id;
        beacon_filter->enable = enable_filter;
 
        /*
@@ -401,7 +407,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif)
 {
        struct acx_beacon_filter_ie_table *ie_table;
        int i, idx = 0;
@@ -417,7 +424,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
        }
 
        /* configure default beacon pass-through rules */
-       ie_table->role_id = wl->role_id;
+       ie_table->role_id = wlvif->role_id;
        ie_table->num_ie = 0;
        for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
                struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +465,8 @@ out:
 
 #define ACX_CONN_MONIT_DISABLE_VALUE  0xffffffff
 
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable)
 {
        struct acx_conn_monit_params *acx;
        u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +487,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
                timeout = wl->conf.conn.bss_lose_timeout;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->synch_fail_thold = cpu_to_le32(threshold);
        acx->bss_lose_timeout = cpu_to_le32(timeout);
 
@@ -582,7 +590,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_beacon_broadcast *bb;
        int ret;
@@ -595,7 +603,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
                goto out;
        }
 
-       bb->role_id = wl->role_id;
+       bb->role_id = wlvif->role_id;
        bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
        bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
        bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +620,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
 {
        struct acx_aid *acx_aid;
        int ret;
@@ -625,7 +633,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
                goto out;
        }
 
-       acx_aid->role_id = wl->role_id;
+       acx_aid->role_id = wlvif->role_id;
        acx_aid->aid = cpu_to_le16(aid);
 
        ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +676,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           enum acx_preamble_type preamble)
 {
        struct acx_preamble *acx;
        int ret;
@@ -681,7 +690,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->preamble = preamble;
 
        ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +704,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                           enum acx_ctsprotect_type ctsprotect)
 {
        struct acx_ctsprotect *acx;
@@ -709,7 +718,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->ctsprotect = ctsprotect;
 
        ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +748,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
        return 0;
 }
 
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct acx_rate_policy *acx;
        struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +764,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
        }
 
        wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
-               wl->basic_rate, wl->rate_set);
+               wlvif->basic_rate, wlvif->rate_set);
 
        /* configure one basic rate class */
-       acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
-       acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+       acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+       acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
        acx->rate_policy.short_retry_limit = c->short_retry_limit;
        acx->rate_policy.long_retry_limit = c->long_retry_limit;
        acx->rate_policy.aflags = c->aflags;
@@ -771,8 +780,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
        }
 
        /* configure one AP supported rate class */
-       acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
-       acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+       acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+       acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
        acx->rate_policy.short_retry_limit = c->short_retry_limit;
        acx->rate_policy.long_retry_limit = c->long_retry_limit;
        acx->rate_policy.aflags = c->aflags;
@@ -788,7 +797,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
         * (p2p packets should always go out with OFDM rates, even
         * if we are currently connected to 11b AP)
         */
-       acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+       acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
        acx->rate_policy.enabled_rates =
                                cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
        acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +848,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
-                     u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
 {
        struct acx_ac_cfg *acx;
        int ret = 0;
@@ -855,7 +864,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->ac = ac;
        acx->cw_min = cw_min;
        acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +882,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 queue_id, u8 channel_type,
                       u8 tsid, u8 ps_scheme, u8 ack_policy,
                       u32 apsd_conf0, u32 apsd_conf1)
 {
@@ -889,7 +899,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->queue_id = queue_id;
        acx->channel_type = channel_type;
        acx->tsid = tsid;
@@ -1098,7 +1108,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         bool enable)
 {
        struct wl1271_acx_bet_enable *acx = NULL;
        int ret = 0;
@@ -1114,7 +1125,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
        acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
 
@@ -1129,7 +1140,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u8 enable, __be32 address)
 {
        struct wl1271_acx_arp_filter *acx;
        int ret;
@@ -1142,7 +1154,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->version = ACX_IPV4_VERSION;
        acx->enable = enable;
 
@@ -1189,7 +1201,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              bool enable)
 {
        struct wl1271_acx_keep_alive_mode *acx = NULL;
        int ret = 0;
@@ -1202,7 +1215,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->enabled = enable;
 
        ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1229,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                u8 index, u8 tpl_valid)
 {
        struct wl1271_acx_keep_alive_config *acx = NULL;
        int ret = 0;
@@ -1229,7 +1243,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
        acx->index = index;
        acx->tpl_validation = tpl_valid;
@@ -1247,8 +1261,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
-                               s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               bool enable, s16 thold, u8 hyst)
 {
        struct wl1271_acx_rssi_snr_trigger *acx = NULL;
        int ret = 0;
@@ -1261,9 +1275,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
                goto out;
        }
 
-       wl->last_rssi_event = -1;
+       wlvif->last_rssi_event = -1;
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
        acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
        acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1302,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
        struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1317,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->rssi_beacon = c->avg_weight_rssi_beacon;
        acx->rssi_data = c->avg_weight_rssi_data;
        acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1382,7 @@ out:
 }
 
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif,
                                   u16 ht_operation_mode)
 {
        struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1396,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
                goto out;
        }
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->ht_protection =
                (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
        acx->rifs_mode = 0;
@@ -1402,7 +1418,8 @@ out:
 }
 
 /* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_ba_initiator_policy *acx;
        int ret;
@@ -1416,7 +1433,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
        }
 
        /* set for the current role */
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
        acx->win_size = wl->conf.ht.tx_ba_win_size;
        acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1511,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              bool enable)
 {
        struct wl1271_acx_ps_rx_streaming *rx_streaming;
        u32 conf_queues, enable_queues;
@@ -1523,7 +1541,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
                if (!(conf_queues & BIT(i)))
                        continue;
 
-               rx_streaming->role_id = wl->role_id;
+               rx_streaming->role_id = wlvif->role_id;
                rx_streaming->tid = i;
                rx_streaming->enable = enable_queues & BIT(i);
                rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1560,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_ap_max_tx_retry *acx = NULL;
        int ret;
@@ -1553,7 +1571,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
        if (!acx)
                return -ENOMEM;
 
-       acx->role_id = wl->role_id;
+       acx->role_id = wlvif->role_id;
        acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
 
        ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1585,7 @@ out:
        return ret;
 }
 
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl1271_acx_config_ps *config_ps;
        int ret;
@@ -1582,7 +1600,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
 
        config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
        config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
-       config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+       config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
 
        ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
                                   sizeof(*config_ps));
index e3f93b4..b2d85be 100644 (file)
@@ -654,11 +654,6 @@ struct acx_rate_class {
        u8 reserved;
 };
 
-#define ACX_TX_BASIC_RATE      0
-#define ACX_TX_AP_FULL_RATE    1
-#define ACX_TX_BASIC_RATE_P2P  2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
 struct acx_rate_policy {
        struct acx_header header;
 
@@ -1234,39 +1229,49 @@ enum {
 };
 
 
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif);
 int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_mem_map(struct wl1271 *wl,
                       struct acx_header *mem_map, size_t len);
 int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
 int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
-                                void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                   enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u32 rts_threshold);
 int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                bool enable);
 int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
 int wl12xx_acx_sg_cfg(struct wl1271 *wl);
 int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
 int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                           enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                           enum acx_ctsprotect_type ctsprotect);
 int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
                      u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
-                     u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 queue_id, u8 channel_type,
                       u8 tsid, u8 ps_scheme, u8 ack_policy,
                       u32 apsd_conf0, u32 apsd_conf1);
 int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1281,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
 int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
 int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u8 enable, __be32 address);
 int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
-                               s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+                              bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif);
 int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
                                    struct ieee80211_sta_ht_cap *ht_cap,
                                    bool allow_ht_operation, u8 hlid);
 int wl1271_acx_set_ht_information(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif,
                                   u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif);
 int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
                                       u16 ssn, bool enable, u8 peer_hlid);
 int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
index 6813379..8f9cf5a 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/wl12xx.h>
 #include <linux/export.h>
 
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
 #include "boot.h"
@@ -347,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
                nvs_ptr += 3;
 
                for (i = 0; i < burst_len; i++) {
+                       if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+                               goto out_badnvs;
+
                        val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
                               | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
 
@@ -358,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
                        nvs_ptr += 4;
                        dest_addr += 4;
                }
+
+               if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+                       goto out_badnvs;
        }
 
        /*
@@ -369,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
         */
        nvs_ptr = (u8 *)wl->nvs +
                        ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+       if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+               goto out_badnvs;
+
        nvs_len -= nvs_ptr - (u8 *)wl->nvs;
 
        /* Now we must set the partition correctly */
@@ -384,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
 
        kfree(nvs_aligned);
        return 0;
+
+out_badnvs:
+       wl1271_error("nvs data is malformed");
+       return -EILSEQ;
 }
 
 static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
index a52299e..e0d2179 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "reg.h"
 #include "io.h"
 #include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
        if (!wl->nvs)
                return -ENODEV;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from INI out of bounds");
+               return -EINVAL;
+       }
+
        gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
        if (!gen_parms)
                return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
        gp->tx_bip_fem_manufacturer =
                gen_parms->general_params.tx_bip_fem_manufacturer;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from FW out of bounds");
+               ret = -EINVAL;
+               goto out;
+       }
+
        wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
                     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
 
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
        if (!wl->nvs)
                return -ENODEV;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from ini out of bounds");
+               return -EINVAL;
+       }
+
        gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
        if (!gen_parms)
                return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
        gp->tx_bip_fem_manufacturer =
                gen_parms->general_params.tx_bip_fem_manufacturer;
 
+       if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+               wl1271_warning("FEM index from FW out of bounds");
+               ret = -EINVAL;
+               goto out;
+       }
+
        wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
                     answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
 
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
        return 0;
 }
 
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+                          u8 *role_id)
 {
        struct wl12xx_cmd_role_enable *cmd;
        int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
                goto out_free;
        }
 
-       memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+       memcpy(cmd->mac_address, addr, ETH_ALEN);
        cmd->role_type = role_type;
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
        return ret;
 }
 
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
        u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
        if (link >= WL12XX_MAX_LINKS)
                return -EBUSY;
 
        __set_bit(link, wl->links_map);
+       __set_bit(link, wlvif->links_map);
        *hlid = link;
        return 0;
 }
 
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
        if (*hlid == WL12XX_INVALID_LINK_ID)
                return;
 
        __clear_bit(*hlid, wl->links_map);
+       __clear_bit(*hlid, wlvif->links_map);
        *hlid = WL12XX_INVALID_LINK_ID;
 }
 
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif)
 {
-       if (wl->session_counter >= SESSION_COUNTER_MAX)
-               wl->session_counter = 0;
+       if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+               wlvif->session_counter = 0;
 
-       wl->session_counter++;
+       wlvif->session_counter++;
 
-       return wl->session_counter;
+       return wlvif->session_counter;
 }
 
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_start *cmd;
        int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
 
-       cmd->role_id = wl->dev_role_id;
-       if (wl->band == IEEE80211_BAND_5GHZ)
+       cmd->role_id = wlvif->dev_role_id;
+       if (wlvif->band == IEEE80211_BAND_5GHZ)
                cmd->band = WL12XX_BAND_5GHZ;
-       cmd->channel = wl->channel;
+       cmd->channel = wlvif->channel;
 
-       if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
-               ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+       if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+               ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
                if (ret)
                        goto out_free;
        }
-       cmd->device.hlid = wl->dev_hlid;
-       cmd->device.session = wl->session_counter;
+       cmd->device.hlid = wlvif->dev_hlid;
+       cmd->device.session = wlvif->session_counter;
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
                     cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
 
 err_hlid:
        /* clear links on error */
-       __clear_bit(wl->dev_hlid, wl->links_map);
-       wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+       wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
        kfree(cmd);
@@ -513,12 +539,13 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
 
-       if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+       if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
                return -EINVAL;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_CMD, "cmd role stop dev");
 
-       cmd->role_id = wl->dev_role_id;
+       cmd->role_id = wlvif->dev_role_id;
        cmd->disc_type = DISCONNECT_IMMEDIATE;
        cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
 
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
                goto out_free;
        }
 
-       wl12xx_free_link(wl, &wl->dev_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
 
 out_free:
        kfree(cmd);
@@ -554,8 +581,9 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_cmd_role_start *cmd;
        int ret;
 
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
-       if (wl->band == IEEE80211_BAND_5GHZ)
+       cmd->role_id = wlvif->role_id;
+       if (wlvif->band == IEEE80211_BAND_5GHZ)
                cmd->band = WL12XX_BAND_5GHZ;
-       cmd->channel = wl->channel;
-       cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-       cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->channel = wlvif->channel;
+       cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+       cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
-       cmd->sta.ssid_len = wl->ssid_len;
-       memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
-       memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
-       cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+       cmd->sta.ssid_len = wlvif->ssid_len;
+       memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+       memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+       cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
 
-       if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
-               ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+       if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+               ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
                if (ret)
                        goto out_free;
        }
-       cmd->sta.hlid = wl->sta_hlid;
-       cmd->sta.session = wl12xx_get_new_session_id(wl);
-       cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+       cmd->sta.hlid = wlvif->sta.hlid;
+       cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+       cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
                     "basic_rate_set: 0x%x, remote_rates: 0x%x",
-                    wl->role_id, cmd->sta.hlid, cmd->sta.session,
-                    wl->basic_rate_set, wl->rate_set);
+                    wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+                    wlvif->basic_rate_set, wlvif->rate_set);
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
 
 err_hlid:
        /* clear links on error. */
-       wl12xx_free_link(wl, &wl->sta_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
        kfree(cmd);
@@ -613,12 +641,12 @@ out:
 }
 
 /* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
 
-       if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+       if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
                return -EINVAL;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
+       cmd->role_id = wlvif->role_id;
        cmd->disc_type = DISCONNECT_IMMEDIATE;
        cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
 
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
                goto out_free;
        }
 
-       wl12xx_free_link(wl, &wl->sta_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
        kfree(cmd);
@@ -648,16 +676,17 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_start *cmd;
-       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        int ret;
 
-       wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
 
        /* trying to use hidden SSID with an old hostapd version */
-       if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+       if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
                wl1271_error("got a null SSID from beacon/bss");
                ret = -EINVAL;
                goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
                goto out;
        }
 
-       ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+       ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
        if (ret < 0)
                goto out_free;
 
-       ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+       ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
        if (ret < 0)
                goto out_free_global;
 
-       cmd->role_id = wl->role_id;
+       cmd->role_id = wlvif->role_id;
        cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
        cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
-       cmd->ap.global_hlid = wl->ap_global_hlid;
-       cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
-       cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-       cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->ap.global_hlid = wlvif->ap.global_hlid;
+       cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+       cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+       cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->ap.dtim_interval = bss_conf->dtim_period;
        cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
-       cmd->channel = wl->channel;
+       cmd->channel = wlvif->channel;
 
        if (!bss_conf->hidden_ssid) {
                /* take the SSID from the beacon for backward compatibility */
                cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
-               cmd->ap.ssid_len = wl->ssid_len;
-               memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+               cmd->ap.ssid_len = wlvif->ssid_len;
+               memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
        } else {
                cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
                cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
 
        cmd->ap.local_rates = cpu_to_le32(0xffffffff);
 
-       switch (wl->band) {
+       switch (wlvif->band) {
        case IEEE80211_BAND_2GHZ:
                cmd->band = RADIO_BAND_2_4GHZ;
                break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
                cmd->band = RADIO_BAND_5GHZ;
                break;
        default:
-               wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+               wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
                cmd->band = RADIO_BAND_2_4GHZ;
                break;
        }
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
        goto out_free;
 
 out_free_bcast:
-       wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
 
 out_free_global:
-       wl12xx_free_link(wl, &wl->ap_global_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
 
 out_free:
        kfree(cmd);
@@ -735,7 +764,7 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl12xx_cmd_role_stop *cmd;
        int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
+       cmd->role_id = wlvif->role_id;
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
                goto out_free;
        }
 
-       wl12xx_free_link(wl, &wl->ap_bcast_hlid);
-       wl12xx_free_link(wl, &wl->ap_global_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
 
 out_free:
        kfree(cmd);
@@ -766,10 +795,11 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_cmd_role_start *cmd;
-       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        int ret;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
                goto out;
        }
 
-       wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+       wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
 
-       cmd->role_id = wl->role_id;
-       if (wl->band == IEEE80211_BAND_5GHZ)
+       cmd->role_id = wlvif->role_id;
+       if (wlvif->band == IEEE80211_BAND_5GHZ)
                cmd->band = WL12XX_BAND_5GHZ;
-       cmd->channel = wl->channel;
-       cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
-       cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+       cmd->channel = wlvif->channel;
+       cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+       cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
        cmd->ibss.dtim_interval = bss_conf->dtim_period;
        cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
-       cmd->ibss.ssid_len = wl->ssid_len;
-       memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
-       memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
-       cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+       cmd->ibss.ssid_len = wlvif->ssid_len;
+       memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+       memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+       cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
 
-       if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
-               ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+       if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+               ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
                if (ret)
                        goto out_free;
        }
-       cmd->ibss.hlid = wl->sta_hlid;
-       cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+       cmd->ibss.hlid = wlvif->sta.hlid;
+       cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
 
        wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
                     "basic_rate_set: 0x%x, remote_rates: 0x%x",
-                    wl->role_id, cmd->sta.hlid, cmd->sta.session,
-                    wl->basic_rate_set, wl->rate_set);
+                    wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+                    wlvif->basic_rate_set, wlvif->rate_set);
 
-       wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+       wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+                    vif->bss_conf.bssid);
 
        ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
 
 err_hlid:
        /* clear links on error. */
-       wl12xx_free_link(wl, &wl->sta_hlid);
+       wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
 
 out_free:
        kfree(cmd);
@@ -962,7 +993,8 @@ out:
        return ret;
 }
 
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 ps_mode)
 {
        struct wl1271_cmd_ps_params *ps_params = NULL;
        int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
                goto out;
        }
 
-       ps_params->role_id = wl->role_id;
+       ps_params->role_id = wlvif->role_id;
        ps_params->ps_mode = ps_mode;
 
        ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
        return ret;
 }
 
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct sk_buff *skb = NULL;
        int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
        int ret = -ENOMEM;
 
 
-       if (wl->bss_type == BSS_TYPE_IBSS) {
+       if (wlvif->bss_type == BSS_TYPE_IBSS) {
                size = sizeof(struct wl12xx_null_data_template);
                ptr = NULL;
        } else {
-               skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+               skb = ieee80211_nullfunc_get(wl->hw,
+                                            wl12xx_wlvif_to_vif(wlvif));
                if (!skb)
                        goto out;
                size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
        }
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
-                                     wl->basic_rate);
+                                     wlvif->basic_rate);
 
 out:
        dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
 
 }
 
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb = NULL;
        int ret = -ENOMEM;
 
-       skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+       skb = ieee80211_nullfunc_get(wl->hw, vif);
        if (!skb)
                goto out;
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
                                      skb->data, skb->len,
                                      CMD_TEMPL_KLV_IDX_NULL_DATA,
-                                     wl->basic_rate);
+                                     wlvif->basic_rate);
 
 out:
        dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
 
 }
 
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u16 aid)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb;
        int ret = 0;
 
-       skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+       skb = ieee80211_pspoll_get(wl->hw, vif);
        if (!skb)
                goto out;
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
-                                     skb->len, 0, wl->basic_rate_set);
+                                     skb->len, 0, wlvif->basic_rate_set);
 
 out:
        dev_kfree_skb(skb);
        return ret;
 }
 
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               const u8 *ssid, size_t ssid_len,
                               const u8 *ie, size_t ie_len, u8 band)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb;
        int ret;
        u32 rate;
 
-       skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+       skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
                                     ie, ie_len);
        if (!skb) {
                ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
 
        wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
        if (band == IEEE80211_BAND_2GHZ)
                ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
                                              skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
 }
 
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif,
                                              struct sk_buff *skb)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        int ret;
        u32 rate;
 
        if (!skb)
-               skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+               skb = ieee80211_ap_probereq_get(wl->hw, vif);
        if (!skb)
                goto out;
 
        wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
-       if (wl->band == IEEE80211_BAND_2GHZ)
+       rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+       if (wlvif->band == IEEE80211_BAND_2GHZ)
                ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
                                              skb->data, skb->len, 0, rate);
        else
@@ -1159,9 +1199,11 @@ out:
        return skb;
 }
 
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            __be32 ip_addr)
 {
        int ret;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct wl12xx_arp_rsp_template tmpl;
        struct ieee80211_hdr_3addr *hdr;
        struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
        hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                         IEEE80211_STYPE_DATA |
                                         IEEE80211_FCTL_TODS);
-       memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
-       memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+       memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+       memcpy(hdr->addr2, vif->addr, ETH_ALEN);
        memset(hdr->addr3, 0xff, ETH_ALEN);
 
        /* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
        arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
 
        /* arp payload */
-       memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+       memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
        tmpl.sender_ip = ip_addr;
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
                                      &tmpl, sizeof(tmpl), 0,
-                                     wl->basic_rate);
+                                     wlvif->basic_rate);
 
        return ret;
 }
 
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct ieee80211_qos_hdr template;
 
        memset(&template, 0, sizeof(template));
 
-       memcpy(template.addr1, wl->bssid, ETH_ALEN);
-       memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
-       memcpy(template.addr3, wl->bssid, ETH_ALEN);
+       memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+       memcpy(template.addr2, vif->addr, ETH_ALEN);
+       memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
 
        template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
                                             IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
 
        return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
                                       sizeof(template), 0,
-                                      wl->basic_rate);
+                                      wlvif->basic_rate);
 }
 
 int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
        return ret;
 }
 
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u16 action, u8 id, u8 key_type,
                       u8 key_size, const u8 *key, const u8 *addr,
                       u32 tx_seq_32, u16 tx_seq_16)
 {
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
        int ret = 0;
 
        /* hlid might have already been deleted */
-       if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+       if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
                return 0;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                goto out;
        }
 
-       cmd->hlid = wl->sta_hlid;
+       cmd->hlid = wlvif->sta.hlid;
 
        if (key_type == KEY_WEP)
                cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
  * TODO: merge with sta/ibss into 1 set_key function.
  * note there are slight diffs
  */
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
-                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
-                       u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u16 action, u8 id, u8 key_type,
+                         u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+                         u16 tx_seq_16)
 {
        struct wl1271_cmd_set_keys *cmd;
        int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
        if (!cmd)
                return -ENOMEM;
 
-       if (hlid == wl->ap_bcast_hlid) {
+       if (hlid == wlvif->ap.bcast_hlid) {
                if (key_type == KEY_WEP)
                        lid_type = WEP_DEFAULT_LID_TYPE;
                else
@@ -1411,7 +1456,8 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       struct ieee80211_sta *sta, u8 hlid)
 {
        struct wl12xx_cmd_add_peer *cmd;
        int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
                else
                        cmd->psd_type[i] = WL1271_PSD_LEGACY;
 
-       sta_rates = sta->supp_rates[wl->band];
+       sta_rates = sta->supp_rates[wlvif->band];
        if (sta->ht_cap.ht_supported)
                sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
 
        cmd->supported_rates =
                cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
-                                                       wl->band));
+                                                       wlvif->band));
 
        wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
                     cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
        return ret;
 }
 
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u8 role_id)
 {
        struct wl12xx_cmd_roc *cmd;
        int ret = 0;
 
-       wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+       wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
 
        if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
                return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
        }
 
        cmd->role_id = role_id;
-       cmd->channel = wl->channel;
-       switch (wl->band) {
+       cmd->channel = wlvif->channel;
+       switch (wlvif->band) {
        case IEEE80211_BAND_2GHZ:
                cmd->band = RADIO_BAND_2_4GHZ;
                break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
                cmd->band = RADIO_BAND_5GHZ;
                break;
        default:
-               wl1271_error("roc - unknown band: %d", (int)wl->band);
+               wl1271_error("roc - unknown band: %d", (int)wlvif->band);
                ret = -EINVAL;
                goto out_free;
        }
@@ -1657,14 +1704,14 @@ out:
        return ret;
 }
 
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
 {
        int ret = 0;
 
        if (WARN_ON(test_bit(role_id, wl->roc_map)))
                return 0;
 
-       ret = wl12xx_cmd_roc(wl, role_id);
+       ret = wl12xx_cmd_roc(wl, wlvif, role_id);
        if (ret < 0)
                goto out;
 
@@ -1753,3 +1800,50 @@ out_free:
 out:
        return ret;
 }
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
+
+       if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+                     wlvif->bss_type == BSS_TYPE_IBSS)))
+               return -EINVAL;
+
+       ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+       if (ret < 0)
+               goto out;
+
+       ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+       if (ret < 0)
+               goto out_stop;
+
+       return 0;
+
+out_stop:
+       wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+       return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
+
+       if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+                     wlvif->bss_type == BSS_TYPE_IBSS)))
+               return -EINVAL;
+
+       if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+               ret = wl12xx_croc(wl, wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+       if (ret < 0)
+               goto out;
+out:
+       return ret;
+}
index b7bd427..3f7d0b9 100644 (file)
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
 int wl1271_cmd_radio_parms(struct wl1271 *wl);
 int wl128x_cmd_radio_parms(struct wl1271 *wl);
 int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+                          u8 *role_id);
 int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
 int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
 int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u8 ps_mode);
 int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
                           size_t len);
 int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
                            void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               const u8 *ssid, size_t ssid_len,
                               const u8 *ie, size_t ie_len, u8 band);
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif,
                                              struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+                                  struct wl12xx_vif *wlvif);
 int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u16 action, u8 id, u8 key_type,
                           u8 key_size, const u8 *key, const u8 *addr,
                           u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u16 action, u8 id, u8 key_type,
                          u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
                          u16 tx_seq_16);
 int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                       struct ieee80211_sta *sta, u8 hlid);
 int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_channel_switch(struct wl1271 *wl,
                              struct ieee80211_channel_switch *ch_switch);
 int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                        u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
 
 enum wl1271_commands {
        CMD_INTERROGATE     = 1,    /*use this to read information elements*/
index 04bb8fb..1bcfb01 100644 (file)
@@ -440,6 +440,10 @@ struct conf_rx_settings {
        CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
        CONF_HW_BIT_RATE_54MBPS)
 
+#define CONF_TX_CCK_RATES  (CONF_HW_BIT_RATE_1MBPS |           \
+       CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS |     \
+       CONF_HW_BIT_RATE_11MBPS)
+
 #define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS |             \
        CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS |      \
        CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS |      \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644 (file)
index 0000000..b85fd8c
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+       DEBUG_NONE      = 0,
+       DEBUG_IRQ       = BIT(0),
+       DEBUG_SPI       = BIT(1),
+       DEBUG_BOOT      = BIT(2),
+       DEBUG_MAILBOX   = BIT(3),
+       DEBUG_TESTMODE  = BIT(4),
+       DEBUG_EVENT     = BIT(5),
+       DEBUG_TX        = BIT(6),
+       DEBUG_RX        = BIT(7),
+       DEBUG_SCAN      = BIT(8),
+       DEBUG_CRYPT     = BIT(9),
+       DEBUG_PSM       = BIT(10),
+       DEBUG_MAC80211  = BIT(11),
+       DEBUG_CMD       = BIT(12),
+       DEBUG_ACX       = BIT(13),
+       DEBUG_SDIO      = BIT(14),
+       DEBUG_FILTERS   = BIT(15),
+       DEBUG_ADHOC     = BIT(16),
+       DEBUG_AP        = BIT(17),
+       DEBUG_MASTER    = (DEBUG_ADHOC | DEBUG_AP),
+       DEBUG_ALL       = ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+       pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+       pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+       do { \
+               if (level & wl12xx_debug_level) \
+                       pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+       } while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len)   \
+       do { \
+               if (level & wl12xx_debug_level) \
+                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+                                      DUMP_PREFIX_OFFSET, 16, 1,       \
+                                      buf,                             \
+                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+                                      0);                              \
+       } while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len)     \
+       do { \
+               if (level & wl12xx_debug_level) \
+                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+                                      DUMP_PREFIX_OFFSET, 16, 1,       \
+                                      buf,                             \
+                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+                                      true);                           \
+       } while (0)
+
+#endif /* __DEBUG_H__ */
index 3999fd5..2e14b43 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "ps.h"
 #include "io.h"
@@ -346,29 +347,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
        DRIVER_STATE_PRINT_INT(tx_results_count);
        DRIVER_STATE_PRINT_LHEX(flags);
        DRIVER_STATE_PRINT_INT(tx_blocks_freed);
-       DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
        DRIVER_STATE_PRINT_INT(rx_counter);
-       DRIVER_STATE_PRINT_INT(session_counter);
        DRIVER_STATE_PRINT_INT(state);
-       DRIVER_STATE_PRINT_INT(bss_type);
        DRIVER_STATE_PRINT_INT(channel);
-       DRIVER_STATE_PRINT_HEX(rate_set);
-       DRIVER_STATE_PRINT_HEX(basic_rate_set);
-       DRIVER_STATE_PRINT_HEX(basic_rate);
        DRIVER_STATE_PRINT_INT(band);
-       DRIVER_STATE_PRINT_INT(beacon_int);
-       DRIVER_STATE_PRINT_INT(psm_entry_retry);
-       DRIVER_STATE_PRINT_INT(ps_poll_failures);
        DRIVER_STATE_PRINT_INT(power_level);
-       DRIVER_STATE_PRINT_INT(rssi_thold);
-       DRIVER_STATE_PRINT_INT(last_rssi_event);
        DRIVER_STATE_PRINT_INT(sg_enabled);
        DRIVER_STATE_PRINT_INT(enable_11a);
        DRIVER_STATE_PRINT_INT(noise);
-       DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
-       DRIVER_STATE_PRINT_INT(last_tx_hlid);
-       DRIVER_STATE_PRINT_INT(ba_support);
-       DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
        DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
        DRIVER_STATE_PRINT_LHEX(ap_ps_map);
        DRIVER_STATE_PRINT_HEX(quirks);
@@ -399,6 +385,115 @@ static const struct file_operations driver_state_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
+       int ret, res = 0;
+       const int buf_size = 4096;
+       char *buf;
+       char tmp_buf[64];
+
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt)                                \
+       (res += scnprintf(buf + res, buf_size - res,    \
+                         #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x)  VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x)   VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x)   VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x)  VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x)   VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len)                           \
+       do {                                                    \
+               memset(tmp_buf, 0, sizeof(tmp_buf));            \
+               memcpy(tmp_buf, wlvif->x,                       \
+                      min_t(u8, len, sizeof(tmp_buf) - 1));    \
+               res += scnprintf(buf + res, buf_size - res,     \
+                                #x " = %s\n", tmp_buf);        \
+       } while (0)
+
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               VIF_STATE_PRINT_INT(role_id);
+               VIF_STATE_PRINT_INT(bss_type);
+               VIF_STATE_PRINT_LHEX(flags);
+               VIF_STATE_PRINT_INT(p2p);
+               VIF_STATE_PRINT_INT(dev_role_id);
+               VIF_STATE_PRINT_INT(dev_hlid);
+
+               if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+                   wlvif->bss_type == BSS_TYPE_IBSS) {
+                       VIF_STATE_PRINT_INT(sta.hlid);
+                       VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+                       VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+                       VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+                       VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+               } else {
+                       VIF_STATE_PRINT_INT(ap.global_hlid);
+                       VIF_STATE_PRINT_INT(ap.bcast_hlid);
+                       VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+                       VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+                       VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+                       VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+               }
+               VIF_STATE_PRINT_INT(last_tx_hlid);
+               VIF_STATE_PRINT_LHEX(links_map[0]);
+               VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+               VIF_STATE_PRINT_INT(band);
+               VIF_STATE_PRINT_INT(channel);
+               VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+               VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+               VIF_STATE_PRINT_HEX(basic_rate_set);
+               VIF_STATE_PRINT_HEX(basic_rate);
+               VIF_STATE_PRINT_HEX(rate_set);
+               VIF_STATE_PRINT_INT(beacon_int);
+               VIF_STATE_PRINT_INT(default_key);
+               VIF_STATE_PRINT_INT(aid);
+               VIF_STATE_PRINT_INT(session_counter);
+               VIF_STATE_PRINT_INT(ps_poll_failures);
+               VIF_STATE_PRINT_INT(psm_entry_retry);
+               VIF_STATE_PRINT_INT(power_level);
+               VIF_STATE_PRINT_INT(rssi_thold);
+               VIF_STATE_PRINT_INT(last_rssi_event);
+               VIF_STATE_PRINT_INT(ba_support);
+               VIF_STATE_PRINT_INT(ba_allowed);
+               VIF_STATE_PRINT_LLHEX(tx_security_seq);
+               VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+       }
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+       mutex_unlock(&wl->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+       kfree(buf);
+       return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+       .read = vifs_state_read,
+       .open = wl1271_open_file_generic,
+       .llseek = default_llseek,
+};
+
 static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
                                  size_t count, loff_t *ppos)
 {
@@ -520,6 +615,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
                           size_t count, loff_t *ppos)
 {
        struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
        unsigned long value;
        int ret;
 
@@ -543,7 +639,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
        if (ret < 0)
                goto out;
 
-       wl1271_recalc_rx_streaming(wl);
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               wl1271_recalc_rx_streaming(wl, wlvif);
+       }
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -572,6 +670,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
                           size_t count, loff_t *ppos)
 {
        struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
        unsigned long value;
        int ret;
 
@@ -595,7 +694,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
        if (ret < 0)
                goto out;
 
-       wl1271_recalc_rx_streaming(wl);
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               wl1271_recalc_rx_streaming(wl, wlvif);
+       }
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -624,6 +725,7 @@ static ssize_t beacon_filtering_write(struct file *file,
                                      size_t count, loff_t *ppos)
 {
        struct wl1271 *wl = file->private_data;
+       struct wl12xx_vif *wlvif;
        char buf[10];
        size_t len;
        unsigned long value;
@@ -646,7 +748,9 @@ static ssize_t beacon_filtering_write(struct file *file,
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+       }
 
        wl1271_ps_elp_sleep(wl);
 out:
@@ -770,6 +874,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
        DEBUGFS_ADD(gpio_power, rootdir);
        DEBUGFS_ADD(start_recovery, rootdir);
        DEBUGFS_ADD(driver_state, rootdir);
+       DEBUGFS_ADD(vifs_state, rootdir);
        DEBUGFS_ADD(dtim_interval, rootdir);
        DEBUGFS_ADD(beacon_interval, rootdir);
        DEBUGFS_ADD(beacon_filtering, rootdir);
index 674ad2a..00ce794 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "reg.h"
 #include "io.h"
 #include "event.h"
 
 void wl1271_pspoll_work(struct work_struct *work)
 {
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        struct delayed_work *dwork;
        struct wl1271 *wl;
        int ret;
 
        dwork = container_of(work, struct delayed_work, work);
-       wl = container_of(dwork, struct wl1271, pspoll_work);
+       wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+       vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+       wl = wlvif->wl;
 
        wl1271_debug(DEBUG_EVENT, "pspoll work");
 
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+       if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
                goto out;
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out;
 
        /*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
        if (ret < 0)
                goto out;
 
-       wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+       wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+                          wlvif->basic_rate, true);
 
        wl1271_ps_elp_sleep(wl);
 out:
        mutex_unlock(&wl->mutex);
 };
 
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif)
 {
        int delay = wl->conf.conn.ps_poll_recovery_period;
        int ret;
 
-       wl->ps_poll_failures++;
-       if (wl->ps_poll_failures == 1)
+       wlvif->ps_poll_failures++;
+       if (wlvif->ps_poll_failures == 1)
                wl1271_info("AP with dysfunctional ps-poll, "
                            "trying to work around it.");
 
        /* force active mode receive data from the AP */
-       if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
-               ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                        wl->basic_rate, true);
+       if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+               ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+                                        wlvif->basic_rate, true);
                if (ret < 0)
                        return;
-               set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
-               ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+               set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+               ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
                                             msecs_to_jiffies(delay));
        }
 
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
 }
 
 static int wl1271_event_ps_report(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif,
                                  struct event_mailbox *mbox,
                                  bool *beacon_loss)
 {
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
        case EVENT_ENTER_POWER_SAVE_FAIL:
                wl1271_debug(DEBUG_PSM, "PSM entry failed");
 
-               if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+               if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
                        /* remain in active mode */
-                       wl->psm_entry_retry = 0;
+                       wlvif->psm_entry_retry = 0;
                        break;
                }
 
-               if (wl->psm_entry_retry < total_retries) {
-                       wl->psm_entry_retry++;
-                       ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-                                                wl->basic_rate, true);
+               if (wlvif->psm_entry_retry < total_retries) {
+                       wlvif->psm_entry_retry++;
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_POWER_SAVE_MODE,
+                                                wlvif->basic_rate, true);
                } else {
                        wl1271_info("No ack to nullfunc from AP.");
-                       wl->psm_entry_retry = 0;
+                       wlvif->psm_entry_retry = 0;
                        *beacon_loss = true;
                }
                break;
        case EVENT_ENTER_POWER_SAVE_SUCCESS:
-               wl->psm_entry_retry = 0;
-
-               /* enable beacon filtering */
-               ret = wl1271_acx_beacon_filter_opt(wl, true);
-               if (ret < 0)
-                       break;
+               wlvif->psm_entry_retry = 0;
 
                /*
                 * BET has only a minor effect in 5GHz and masks
                 * channel switch IEs, so we only enable BET on 2.4GHz
                */
-               if (wl->band == IEEE80211_BAND_2GHZ)
+               if (wlvif->band == IEEE80211_BAND_2GHZ)
                        /* enable beacon early termination */
-                       ret = wl1271_acx_bet_enable(wl, true);
+                       ret = wl1271_acx_bet_enable(wl, wlvif, true);
 
-               if (wl->ps_compl) {
-                       complete(wl->ps_compl);
-                       wl->ps_compl = NULL;
+               if (wlvif->ps_compl) {
+                       complete(wlvif->ps_compl);
+                       wlvif->ps_compl = NULL;
                }
                break;
        default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
 }
 
 static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif,
                                      struct event_mailbox *mbox)
 {
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        enum nl80211_cqm_rssi_threshold_event event;
        s8 metric = mbox->rssi_snr_trigger_metric[0];
 
        wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
 
-       if (metric <= wl->rssi_thold)
+       if (metric <= wlvif->rssi_thold)
                event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
        else
                event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
 
-       if (event != wl->last_rssi_event)
-               ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
-       wl->last_rssi_event = event;
+       if (event != wlvif->last_rssi_event)
+               ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+       wlvif->last_rssi_event = event;
 }
 
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
-       if (wl->bss_type != BSS_TYPE_AP_BSS) {
-               if (!wl->ba_rx_bitmap)
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+               if (!wlvif->sta.ba_rx_bitmap)
                        return;
-               ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
-                                            wl->bssid);
+               ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+                                            vif->bss_conf.bssid);
        } else {
-               int i;
+               u8 hlid;
                struct wl1271_link *lnk;
-               for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
-                       lnk = &wl->links[i];
-                       if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+               for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+                                WL12XX_MAX_LINKS) {
+                       lnk = &wl->links[hlid];
+                       if (!lnk->ba_bitmap)
                                continue;
 
-                       ieee80211_stop_rx_ba_session(wl->vif,
+                       ieee80211_stop_rx_ba_session(vif,
                                                     lnk->ba_bitmap,
                                                     lnk->addr);
                }
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
 static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
                                               u8 enable)
 {
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
+
        if (enable) {
                /* disable dynamic PS when requested by the firmware */
-               ieee80211_disable_dyn_ps(wl->vif);
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_disable_dyn_ps(vif);
+               }
                set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
        } else {
-               ieee80211_enable_dyn_ps(wl->vif);
                clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
-               wl1271_recalc_rx_streaming(wl);
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_enable_dyn_ps(vif);
+                       wl1271_recalc_rx_streaming(wl, wlvif);
+               }
        }
 
 }
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
 
 static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
 {
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        int ret;
        u32 vector;
        bool beacon_loss = false;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
        bool disconnect_sta = false;
        unsigned long sta_bitmap = 0;
 
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                wl1271_debug(DEBUG_EVENT, "status: 0x%x",
                             mbox->scheduled_scan_status);
 
-               wl1271_scan_stm(wl);
+               wl1271_scan_stm(wl, wl->scan_vif);
        }
 
        if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -253,8 +272,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
-           wl->bss_type == BSS_TYPE_STA_BSS)
+       if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
                wl12xx_event_soft_gemini_sense(wl,
                                               mbox->soft_gemini_sense_info);
 
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
         * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
         *
         */
-       if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+       if (vector & BSS_LOSE_EVENT_ID) {
+               /* TODO: check for multi-role */
                wl1271_info("Beacon loss detected.");
 
                /* indicate to the stack, that beacons have been lost */
                beacon_loss = true;
        }
 
-       if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+       if (vector & PS_REPORT_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
-               ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
-               if (ret < 0)
-                       return ret;
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       ret = wl1271_event_ps_report(wl, wlvif,
+                                                    mbox, &beacon_loss);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
-       if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
-               wl1271_event_pspoll_delivery_fail(wl);
+       if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       wl1271_event_pspoll_delivery_fail(wl, wlvif);
+               }
 
        if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+               /* TODO: check actual multi-role support */
                wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
-               if (wl->vif)
-                       wl1271_event_rssi_trigger(wl, mbox);
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       wl1271_event_rssi_trigger(wl, wlvif, mbox);
+               }
        }
 
-       if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+       if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+               u8 role_id = mbox->role_id;
                wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
-                            "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+                            "ba_allowed = 0x%x, role_id=%d",
+                            mbox->rx_ba_allowed, role_id);
 
-               wl->ba_allowed = !!mbox->rx_ba_allowed;
+               wl12xx_for_each_wlvif(wl, wlvif) {
+                       if (role_id != 0xff && role_id != wlvif->role_id)
+                               continue;
 
-               if (wl->vif && !wl->ba_allowed)
-                       wl1271_stop_ba_event(wl);
+                       wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+                       if (!wlvif->ba_allowed)
+                               wl1271_stop_ba_event(wl, wlvif);
+               }
        }
 
-       if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+       if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
                                          "status = 0x%x",
                                          mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                 * 1) channel switch complete with status=0
                 * 2) channel switch failed status=1
                 */
-               if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
-                   (wl->vif))
-                       ieee80211_chswitch_done(wl->vif,
-                               mbox->channel_switch_status ? false : true);
+
+               /* TODO: configure only the relevant vif */
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+                       bool success;
+
+                       if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+                                               &wl->flags))
+                               continue;
+
+                       success = mbox->channel_switch_status ? false : true;
+                       ieee80211_chswitch_done(vif, success);
+               }
        }
 
        if ((vector & DUMMY_PACKET_EVENT_ID)) {
                wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
-               if (wl->vif)
-                       wl1271_tx_dummy_packet(wl);
+               wl1271_tx_dummy_packet(wl);
        }
 
        /*
         * "TX retries exceeded" has a different meaning according to mode.
         * In AP mode the offending station is disconnected.
         */
-       if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+       if (vector & MAX_TX_RETRY_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
                sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
                disconnect_sta = true;
        }
 
-       if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+       if (vector & INACTIVE_STA_EVENT_ID) {
                wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
                sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
                disconnect_sta = true;
        }
 
-       if (is_ap && disconnect_sta) {
+       if (disconnect_sta) {
                u32 num_packets = wl->conf.tx.max_tx_retries;
                struct ieee80211_sta *sta;
                const u8 *addr;
                int h;
 
-               for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
-                    h < AP_MAX_LINKS;
-                    h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
-                       if (!wl1271_is_active_sta(wl, h))
+               for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+                       bool found = false;
+                       /* find the ap vif connected to this sta */
+                       wl12xx_for_each_wlvif_ap(wl, wlvif) {
+                               if (!test_bit(h, wlvif->ap.sta_hlid_map))
+                                       continue;
+                               found = true;
+                               break;
+                       }
+                       if (!found)
                                continue;
 
+                       vif = wl12xx_wlvif_to_vif(wlvif);
                        addr = wl->links[h].addr;
 
                        rcu_read_lock();
-                       sta = ieee80211_find_sta(wl->vif, addr);
+                       sta = ieee80211_find_sta(vif, addr);
                        if (sta) {
                                wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
                                ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
                }
        }
 
-       if (wl->vif && beacon_loss)
-               ieee80211_connection_loss(wl->vif);
+       if (beacon_loss)
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_connection_loss(vif);
+               }
 
        return 0;
 }
index 49c1a0e..1d878ba 100644 (file)
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
 int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
 void wl1271_pspoll_work(struct work_struct *work);
 
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
 #endif
index 04db64c..88891cd 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
+#include "debug.h"
 #include "init.h"
 #include "wl12xx_80211.h"
 #include "acx.h"
@@ -33,7 +34,7 @@
 #include "tx.h"
 #include "io.h"
 
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
 {
        int ret, i;
 
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
 
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
                                      sizeof
-                                     (struct wl12xx_qos_null_data_template),
+                                     (struct ieee80211_qos_hdr),
                                      0, WL1271_RATE_AUTOMATIC);
        if (ret < 0)
                return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
+       /*
+        * Put very large empty placeholders for all templates. These
+        * reserve memory for later.
+        */
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+                                     WL1271_CMD_TEMPL_MAX_SIZE,
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+                                     WL1271_CMD_TEMPL_MAX_SIZE,
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
+       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+                                     sizeof
+                                     (struct wl12xx_disconn_template),
+                                     0, WL1271_RATE_AUTOMATIC);
+       if (ret < 0)
+               return ret;
+
        for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
                ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
-                                             WL1271_CMD_TEMPL_DFLT_SIZE, i,
-                                             WL1271_RATE_AUTOMATIC);
+                                             sizeof(struct ieee80211_qos_hdr),
+                                             i, WL1271_RATE_AUTOMATIC);
                if (ret < 0)
                        return ret;
        }
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+                                         struct wl12xx_vif *wlvif)
 {
        struct wl12xx_disconn_template *tmpl;
        int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
        tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                             IEEE80211_STYPE_DEAUTH);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
                                      tmpl, sizeof(*tmpl), 0, rate);
 
@@ -123,8 +148,10 @@ out:
        return ret;
 }
 
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+                                       struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct ieee80211_hdr_3addr *nullfunc;
        int ret;
        u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
 
        /* nullfunc->addr1 is filled by FW */
 
-       memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
-       memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+       memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+       memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
                                      sizeof(*nullfunc), 0, rate);
 
@@ -153,8 +180,10 @@ out:
        return ret;
 }
 
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+                                           struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct ieee80211_qos_hdr *qosnull;
        int ret;
        u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
 
        /* qosnull->addr1 is filled by FW */
 
-       memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
-       memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+       memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+       memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
 
-       rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
                                      sizeof(*qosnull), 0, rate);
 
@@ -183,93 +212,59 @@ out:
        return ret;
 }
 
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+static int wl12xx_init_rx_config(struct wl1271 *wl)
 {
        int ret;
 
-       /*
-        * Put very large empty placeholders for all templates. These
-        * reserve memory for later.
-        */
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
-                                     WL1271_CMD_TEMPL_MAX_SIZE,
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
-                                     WL1271_CMD_TEMPL_MAX_SIZE,
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
-                                     sizeof
-                                     (struct wl12xx_disconn_template),
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
-                                     sizeof(struct wl12xx_null_data_template),
-                                     0, WL1271_RATE_AUTOMATIC);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
-                                     sizeof
-                                     (struct wl12xx_qos_null_data_template),
-                                     0, WL1271_RATE_AUTOMATIC);
+       ret = wl1271_acx_rx_msdu_life_time(wl);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl12xx_init_rx_config(struct wl1271 *wl)
+int wl1271_init_phy_config(struct wl1271 *wl)
 {
        int ret;
 
-       ret = wl1271_acx_rx_msdu_life_time(wl);
+       ret = wl1271_acx_pd_threshold(wl);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+                                           struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       ret = wl1271_acx_pd_threshold(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+       ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_service_period_timeout(wl);
+       ret = wl1271_acx_service_period_timeout(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+       ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+                                        struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       /* disable beacon filtering at this stage */
-       ret = wl1271_acx_beacon_filter_opt(wl, false);
+       ret = wl1271_acx_beacon_filter_table(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_acx_beacon_filter_table(wl);
+       /* enable beacon filtering */
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
        if (ret < 0)
                return ret;
 
@@ -302,11 +297,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+                                       struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       ret = wl1271_acx_bcn_dtim_options(wl);
+       ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
        if (ret < 0)
                return ret;
 
@@ -327,7 +323,8 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
@@ -338,25 +335,7 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
        }
 
        /* PS config */
-       ret = wl1271_acx_config_ps(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_sta_init_templates_config(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
-       if (ret < 0)
-               return ret;
-
-       /* Initialize connection monitoring thresholds */
-       ret = wl1271_acx_conn_monit_params(wl, false);
-       if (ret < 0)
-               return ret;
-
-       /* Beacon filtering */
-       ret = wl1271_init_beacon_filter(wl);
+       ret = wl12xx_acx_config_ps(wl, wlvif);
        if (ret < 0)
                return ret;
 
@@ -365,103 +344,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       /* Beacons and broadcast settings */
-       ret = wl1271_init_beacon_broadcast(wl);
-       if (ret < 0)
-               return ret;
-
-       /* Configure for ELP power saving */
-       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
-       if (ret < 0)
-               return ret;
-
-       /* Configure rssi/snr averaging weights */
-       ret = wl1271_acx_rssi_snr_avg_weights(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_sta_rate_policies(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl12xx_acx_mem_cfg(wl);
-       if (ret < 0)
-               return ret;
-
-       /* Configure the FW logger */
-       ret = wl12xx_init_fwlog(wl);
+       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+                                      struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret, i;
 
        /* disable all keep-alive templates */
        for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
-               ret = wl1271_acx_keep_alive_config(wl, i,
+               ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
                                                   ACX_KEEP_ALIVE_TPL_INVALID);
                if (ret < 0)
                        return ret;
        }
 
        /* disable the keep-alive feature */
-       ret = wl1271_acx_keep_alive_mode(wl, false);
+       ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       ret = wl1271_ap_init_templates_config(wl);
-       if (ret < 0)
-               return ret;
-
-       /* Configure for power always on */
-       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_init_ap_rates(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl1271_acx_ap_max_tx_retry(wl);
-       if (ret < 0)
-               return ret;
-
-       ret = wl12xx_acx_mem_cfg(wl);
-       if (ret < 0)
-               return ret;
-
-       /* initialize Tx power */
-       ret = wl1271_acx_tx_power(wl, wl->power_level);
+       ret = wl1271_init_ap_rates(wl, wlvif);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
 
-       ret = wl1271_ap_init_deauth_template(wl);
+       ret = wl1271_ap_init_deauth_template(wl, wlvif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_ap_init_null_template(wl);
+       ret = wl1271_ap_init_null_template(wl, vif);
        if (ret < 0)
                return ret;
 
-       ret = wl1271_ap_init_qos_null_template(wl);
+       ret = wl1271_ap_init_qos_null_template(wl, vif);
        if (ret < 0)
                return ret;
 
@@ -469,43 +406,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
         * when operating as AP we want to receive external beacons for
         * configuring ERP protection.
         */
-       ret = wl1271_acx_beacon_filter_opt(wl, false);
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+                                     struct ieee80211_vif *vif)
 {
-       return wl1271_ap_init_templates(wl);
+       return wl1271_ap_init_templates(wl, vif);
 }
 
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i, ret;
        struct conf_tx_rate_class rc;
        u32 supported_rates;
 
-       wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+       wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+                    wlvif->basic_rate_set);
 
-       if (wl->basic_rate_set == 0)
+       if (wlvif->basic_rate_set == 0)
                return -EINVAL;
 
-       rc.enabled_rates = wl->basic_rate_set;
+       rc.enabled_rates = wlvif->basic_rate_set;
        rc.long_retry_limit = 10;
        rc.short_retry_limit = 10;
        rc.aflags = 0;
-       ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+       ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
        if (ret < 0)
                return ret;
 
        /* use the min basic rate for AP broadcast/multicast */
-       rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+       rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
        rc.short_retry_limit = 10;
        rc.long_retry_limit = 10;
        rc.aflags = 0;
-       ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+       ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
        if (ret < 0)
                return ret;
 
@@ -513,7 +452,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
         * If the basic rates contain OFDM rates, use OFDM only
         * rates for unicast TX as well. Else use all supported rates.
         */
-       if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+       if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
                supported_rates = CONF_TX_OFDM_RATES;
        else
                supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +466,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
                rc.short_retry_limit = 10;
                rc.long_retry_limit = 10;
                rc.aflags = 0;
-               ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+               ret = wl1271_acx_ap_rate_policy(wl, &rc,
+                                               wlvif->ap.ucast_rate_idx[i]);
                if (ret < 0)
                        return ret;
        }
@@ -535,24 +475,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
        return 0;
 }
 
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        /* Reset the BA RX indicators */
-       wl->ba_rx_bitmap = 0;
-       wl->ba_allowed = true;
+       wlvif->ba_allowed = true;
        wl->ba_rx_session_count = 0;
 
        /* BA is supported in STA/AP modes */
-       if (wl->bss_type != BSS_TYPE_AP_BSS &&
-           wl->bss_type != BSS_TYPE_STA_BSS) {
-               wl->ba_support = false;
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+           wlvif->bss_type != BSS_TYPE_STA_BSS) {
+               wlvif->ba_support = false;
                return 0;
        }
 
-       wl->ba_support = true;
+       wlvif->ba_support = true;
 
        /* 802.11n initiator BA session setting */
-       return wl12xx_acx_set_ba_initiator_policy(wl);
+       return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
 }
 
 int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +501,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
        if (wl->chip.id == CHIP_ID_1283_PG20) {
                u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
 
-               if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+               if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
                        /* Enable SDIO padding */
                        host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
 
@@ -575,13 +514,150 @@ out:
        return ret;
 }
 
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
 
-int wl1271_hw_init(struct wl1271 *wl)
+       ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+       if (ret < 0)
+               return ret;
+
+       /* Initialize connection monitoring thresholds */
+       ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+       if (ret < 0)
+               return ret;
+
+       /* Beacon filtering */
+       ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* Beacons and broadcast settings */
+       ret = wl1271_init_beacon_broadcast(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* Configure rssi/snr averaging weights */
+       ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       int ret;
+
+       ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* initialize Tx power */
+       ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct conf_tx_ac_category *conf_ac;
        struct conf_tx_tid *conf_tid;
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret, i;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+       /*
+        * consider all existing roles before configuring psm.
+        * TODO: reconfigure on interface removal.
+        */
+       if (!wl->ap_count) {
+               if (is_ap) {
+                       /* Configure for power always on */
+                       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+                       if (ret < 0)
+                               return ret;
+               } else if (!wl->sta_count) {
+                       /* Configure for ELP power saving */
+                       ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       /* Mode specific init */
+       if (is_ap) {
+               ret = wl1271_ap_hw_init(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+
+               ret = wl12xx_init_ap_role(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+       } else {
+               ret = wl1271_sta_hw_init(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+
+               ret = wl12xx_init_sta_role(wl, wlvif);
+               if (ret < 0)
+                       return ret;
+       }
+
+       wl12xx_init_phy_vif_config(wl, wlvif);
+
+       /* Default TID/AC configuration */
+       BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+       for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+               conf_ac = &wl->conf.tx.ac_conf[i];
+               ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+                                       conf_ac->cw_min, conf_ac->cw_max,
+                                       conf_ac->aifsn, conf_ac->tx_op_limit);
+               if (ret < 0)
+                       return ret;
+
+               conf_tid = &wl->conf.tx.tid_conf[i];
+               ret = wl1271_acx_tid_cfg(wl, wlvif,
+                                        conf_tid->queue_id,
+                                        conf_tid->channel_type,
+                                        conf_tid->tsid,
+                                        conf_tid->ps_scheme,
+                                        conf_tid->ack_policy,
+                                        conf_tid->apsd_conf[0],
+                                        conf_tid->apsd_conf[1]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* Configure HW encryption */
+       ret = wl1271_acx_feature_cfg(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       /* Mode specific init - post mem init */
+       if (is_ap)
+               ret = wl1271_ap_hw_init_post_mem(wl, vif);
+       else
+               ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+       if (ret < 0)
+               return ret;
+
+       /* Configure initiator BA sessions policies */
+       ret = wl1271_set_ba_policies(wl, wlvif);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+int wl1271_hw_init(struct wl1271 *wl)
+{
+       int ret;
 
        if (wl->chip.id == CHIP_ID_1283_PG20)
                ret = wl128x_cmd_general_parms(wl);
@@ -602,12 +678,17 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       /* Mode specific init */
-       if (is_ap)
-               ret = wl1271_ap_hw_init(wl);
-       else
-               ret = wl1271_sta_hw_init(wl);
+       /* Init templates */
+       ret = wl1271_init_templates_config(wl);
+       if (ret < 0)
+               return ret;
+
+       ret = wl12xx_acx_mem_cfg(wl);
+       if (ret < 0)
+               return ret;
 
+       /* Configure the FW logger */
+       ret = wl12xx_init_fwlog(wl);
        if (ret < 0)
                return ret;
 
@@ -655,61 +736,20 @@ int wl1271_hw_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Default TID/AC configuration */
-       BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
-       for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
-               conf_ac = &wl->conf.tx.ac_conf[i];
-               ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
-                                       conf_ac->cw_max, conf_ac->aifsn,
-                                       conf_ac->tx_op_limit);
-               if (ret < 0)
-                       goto out_free_memmap;
-
-               conf_tid = &wl->conf.tx.tid_conf[i];
-               ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
-                                        conf_tid->channel_type,
-                                        conf_tid->tsid,
-                                        conf_tid->ps_scheme,
-                                        conf_tid->ack_policy,
-                                        conf_tid->apsd_conf[0],
-                                        conf_tid->apsd_conf[1]);
-               if (ret < 0)
-                       goto out_free_memmap;
-       }
-
        /* Enable data path */
        ret = wl1271_cmd_data_path(wl, 1);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure HW encryption */
-       ret = wl1271_acx_feature_cfg(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* configure PM */
        ret = wl1271_acx_pm_config(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Mode specific init - post mem init */
-       if (is_ap)
-               ret = wl1271_ap_hw_init_post_mem(wl);
-       else
-               ret = wl1271_sta_hw_init_post_mem(wl);
-
-       if (ret < 0)
-               goto out_free_memmap;
-
        ret = wl12xx_acx_set_rate_mgmt_params(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Configure initiator BA sessions policies */
-       ret = wl1271_set_ba_policies(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        /* configure hangover */
        ret = wl12xx_acx_config_hangover(wl);
        if (ret < 0)
index 3a3c230..81140b8 100644 (file)
 #include "wl12xx.h"
 
 int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
 int wl1271_init_phy_config(struct wl1271 *wl);
 int wl1271_init_pta(struct wl1271 *wl);
 int wl1271_init_energy_detection(struct wl1271 *wl);
 int wl1271_chip_specific_init(struct wl1271 *wl);
 int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
 
 #endif
index c2da66f..079ad38 100644 (file)
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
+#include <linux/interrupt.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "io.h"
 #include "tx.h"
@@ -46,7 +48,7 @@
 bool wl1271_set_block_size(struct wl1271 *wl)
 {
        if (wl->if_ops->set_block_size) {
-               wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+               wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
                return true;
        }
 
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
 
 void wl1271_disable_interrupts(struct wl1271 *wl)
 {
-       wl->if_ops->disable_irq(wl);
+       disable_irq(wl->irq);
 }
 
 void wl1271_enable_interrupts(struct wl1271 *wl)
 {
-       wl->if_ops->enable_irq(wl);
+       enable_irq(wl->irq);
 }
 
 /* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
 void wl1271_io_reset(struct wl1271 *wl)
 {
        if (wl->if_ops->reset)
-               wl->if_ops->reset(wl);
+               wl->if_ops->reset(wl->dev);
 }
 
 void wl1271_io_init(struct wl1271 *wl)
 {
        if (wl->if_ops->init)
-               wl->if_ops->init(wl);
+               wl->if_ops->init(wl->dev);
 }
 
 void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
index e839341..d398cbc 100644 (file)
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
 void wl1271_io_reset(struct wl1271 *wl);
 void wl1271_io_init(struct wl1271 *wl);
 
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
-       return wl->if_ops->dev(wl);
-}
-
-
 /* Raw target IO, address is not translated */
 static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
                                    size_t len, bool fixed)
 {
-       wl->if_ops->write(wl, addr, buf, len, fixed);
+       wl->if_ops->write(wl->dev, addr, buf, len, fixed);
 }
 
 static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
                                   size_t len, bool fixed)
 {
-       wl->if_ops->read(wl, addr, buf, len, fixed);
+       wl->if_ops->read(wl->dev, addr, buf, len, fixed);
 }
 
 static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
 
 static inline void wl1271_power_off(struct wl1271 *wl)
 {
-       wl->if_ops->power(wl, false);
+       wl->if_ops->power(wl->dev, false);
        clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 }
 
 static inline int wl1271_power_on(struct wl1271 *wl)
 {
-       int ret = wl->if_ops->power(wl, true);
+       int ret = wl->if_ops->power(wl->dev, true);
        if (ret == 0)
                set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
 
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
 int wl1271_set_partition(struct wl1271 *wl,
                         struct wl1271_partition_set *p);
 
+bool wl1271_set_block_size(struct wl1271 *wl);
+
 /* Functions from wl1271_main.c */
 
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
 int wl1271_tx_dummy_packet(struct wl1271 *wl);
 
 #endif
index 884f82b..2f7bfa8 100644 (file)
 #include <linux/slab.h>
 #include <linux/wl12xx.h>
 #include <linux/sched.h>
+#include <linux/interrupt.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "wl12xx_80211.h"
 #include "reg.h"
 #include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
 static bool bug_on_recovery;
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
+                                        struct ieee80211_vif *vif,
                                         bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
-       .name           = "wl1271",
-       .id             = -1,
-
-       /* device model insists to have a release function */
-       .dev            = {
-               .release = wl1271_device_release,
-       },
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 static DEFINE_MUTEX(wl_list_mutex);
 static LIST_HEAD(wl_list);
 
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                 unsigned char operstate)
 {
        int ret;
+
        if (operstate != IF_OPER_UP)
                return 0;
 
-       if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+       if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
                return 0;
 
-       ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+       ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
        if (ret < 0)
                return ret;
 
-       wl12xx_croc(wl, wl->role_id);
+       wl12xx_croc(wl, wlvif->role_id);
 
        wl1271_info("Association completed.");
        return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
        struct ieee80211_hw *hw;
        struct wl1271 *wl;
        struct wl1271 *wl_temp;
+       struct wl12xx_vif *wlvif;
        int ret = 0;
 
        /* Check that this notification is for us. */
@@ -459,17 +450,18 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
        if (wl->state == WL1271_STATE_OFF)
                goto out;
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-               goto out;
-
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out;
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+                       continue;
 
-       wl1271_check_operstate(wl, dev->operstate);
+               ret = wl1271_ps_elp_wakeup(wl);
+               if (ret < 0)
+                       goto out;
 
-       wl1271_ps_elp_sleep(wl);
+               wl1271_check_operstate(wl, wlvif, dev->operstate);
 
+               wl1271_ps_elp_sleep(wl);
+       }
 out:
        mutex_unlock(&wl->mutex);
 
@@ -498,19 +490,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
        return 0;
 }
 
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                  bool enable)
 {
        int ret = 0;
 
        /* we should hold wl->mutex */
-       ret = wl1271_acx_ps_rx_streaming(wl, enable);
+       ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
        if (ret < 0)
                goto out;
 
        if (enable)
-               set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+               set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
        else
-               clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+               clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
 out:
        return ret;
 }
@@ -519,25 +512,25 @@ out:
  * this function is being called when the rx_streaming interval
  * has beed changed or rx_streaming should be disabled
  */
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret = 0;
        int period = wl->conf.rx_streaming.interval;
 
        /* don't reconfigure if rx_streaming is disabled */
-       if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
                goto out;
 
        /* reconfigure/disable according to new streaming_period */
        if (period &&
-           test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+           test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
            (wl->conf.rx_streaming.always ||
             test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
-               ret = wl1271_set_rx_streaming(wl, true);
+               ret = wl1271_set_rx_streaming(wl, wlvif, true);
        else {
-               ret = wl1271_set_rx_streaming(wl, false);
+               ret = wl1271_set_rx_streaming(wl, wlvif, false);
                /* don't cancel_work_sync since we might deadlock */
-               del_timer_sync(&wl->rx_streaming_timer);
+               del_timer_sync(&wlvif->rx_streaming_timer);
        }
 out:
        return ret;
@@ -546,13 +539,14 @@ out:
 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
 {
        int ret;
-       struct wl1271 *wl =
-               container_of(work, struct wl1271, rx_streaming_enable_work);
+       struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+                                               rx_streaming_enable_work);
+       struct wl1271 *wl = wlvif->wl;
 
        mutex_lock(&wl->mutex);
 
-       if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
-           !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+       if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+           !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
            (!wl->conf.rx_streaming.always &&
             !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
                goto out;
@@ -564,12 +558,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
        if (ret < 0)
                goto out;
 
-       ret = wl1271_set_rx_streaming(wl, true);
+       ret = wl1271_set_rx_streaming(wl, wlvif, true);
        if (ret < 0)
                goto out_sleep;
 
        /* stop it after some time of inactivity */
-       mod_timer(&wl->rx_streaming_timer,
+       mod_timer(&wlvif->rx_streaming_timer,
                  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
 
 out_sleep:
@@ -581,19 +575,20 @@ out:
 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
 {
        int ret;
-       struct wl1271 *wl =
-               container_of(work, struct wl1271, rx_streaming_disable_work);
+       struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+                                               rx_streaming_disable_work);
+       struct wl1271 *wl = wlvif->wl;
 
        mutex_lock(&wl->mutex);
 
-       if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_set_rx_streaming(wl, false);
+       ret = wl1271_set_rx_streaming(wl, wlvif, false);
        if (ret)
                goto out_sleep;
 
@@ -605,8 +600,9 @@ out:
 
 static void wl1271_rx_streaming_timer(unsigned long data)
 {
-       struct wl1271 *wl = (struct wl1271 *)data;
-       ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+       struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+       struct wl1271 *wl = wlvif->wl;
+       ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 }
 
 static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +641,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
 
 static int wl1271_plt_init(struct wl1271 *wl)
 {
-       struct conf_tx_ac_category *conf_ac;
-       struct conf_tx_tid *conf_tid;
-       int ret, i;
+       int ret;
 
        if (wl->chip.id == CHIP_ID_1283_PG20)
                ret = wl128x_cmd_general_parms(wl);
@@ -676,10 +670,6 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
-       ret = wl1271_sta_init_templates_config(wl);
-       if (ret < 0)
-               return ret;
-
        ret = wl1271_acx_init_mem_config(wl);
        if (ret < 0)
                return ret;
@@ -689,61 +679,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
        if (ret < 0)
                goto out_free_memmap;
 
-       ret = wl1271_acx_dco_itrim_params(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Initialize connection monitoring thresholds */
-       ret = wl1271_acx_conn_monit_params(wl, false);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Bluetooth WLAN coexistence */
-       ret = wl1271_init_pta(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* FM WLAN coexistence */
-       ret = wl1271_acx_fm_coex(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Energy detection */
-       ret = wl1271_init_energy_detection(wl);
-       if (ret < 0)
-               goto out_free_memmap;
-
        ret = wl12xx_acx_mem_cfg(wl);
        if (ret < 0)
                goto out_free_memmap;
 
-       /* Default fragmentation threshold */
-       ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
-       if (ret < 0)
-               goto out_free_memmap;
-
-       /* Default TID/AC configuration */
-       BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
-       for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
-               conf_ac = &wl->conf.tx.ac_conf[i];
-               ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
-                                       conf_ac->cw_max, conf_ac->aifsn,
-                                       conf_ac->tx_op_limit);
-               if (ret < 0)
-                       goto out_free_memmap;
-
-               conf_tid = &wl->conf.tx.tid_conf[i];
-               ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
-                                        conf_tid->channel_type,
-                                        conf_tid->tsid,
-                                        conf_tid->ps_scheme,
-                                        conf_tid->ack_policy,
-                                        conf_tid->apsd_conf[0],
-                                        conf_tid->apsd_conf[1]);
-               if (ret < 0)
-                       goto out_free_memmap;
-       }
-
        /* Enable data path */
        ret = wl1271_cmd_data_path(wl, 1);
        if (ret < 0)
@@ -768,14 +707,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
        return ret;
 }
 
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+                                       struct wl12xx_vif *wlvif,
+                                       u8 hlid, u8 tx_pkts)
 {
        bool fw_ps, single_sta;
 
-       /* only regulate station links */
-       if (hlid < WL1271_AP_STA_HLID_START)
-               return;
-
        fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
        single_sta = (wl->active_sta_count == 1);
 
@@ -784,7 +721,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
         * packets in FW or if the STA is awake.
         */
        if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
-               wl1271_ps_link_end(wl, hlid);
+               wl12xx_ps_link_end(wl, wlvif, hlid);
 
        /*
         * Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +729,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
         * case FW-memory congestion is not a problem.
         */
        else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
-               wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
-       int id;
-
-       /* global/broadcast "stations" are always active */
-       if (hlid < WL1271_AP_STA_HLID_START)
-               return true;
-
-       id = hlid - WL1271_AP_STA_HLID_START;
-       return test_bit(id, wl->ap_hlid_map);
+               wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+                                          struct wl12xx_vif *wlvif,
                                           struct wl12xx_fw_status *status)
 {
+       struct wl1271_link *lnk;
        u32 cur_fw_ps_map;
        u8 hlid, cnt;
 
@@ -825,25 +752,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
                wl->ap_fw_ps_map = cur_fw_ps_map;
        }
 
-       for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
-               if (!wl1271_is_active_sta(wl, hlid))
-                       continue;
-
-               cnt = status->tx_lnk_free_pkts[hlid] -
-                     wl->links[hlid].prev_freed_pkts;
+       for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+               lnk = &wl->links[hlid];
+               cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
 
-               wl->links[hlid].prev_freed_pkts =
-                       status->tx_lnk_free_pkts[hlid];
-               wl->links[hlid].allocated_pkts -= cnt;
+               lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+               lnk->allocated_pkts -= cnt;
 
-               wl12xx_irq_ps_regulate_link(wl, hlid,
-                                           wl->links[hlid].allocated_pkts);
+               wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+                                           lnk->allocated_pkts);
        }
 }
 
 static void wl12xx_fw_status(struct wl1271 *wl,
                             struct wl12xx_fw_status *status)
 {
+       struct wl12xx_vif *wlvif;
        struct timespec ts;
        u32 old_tx_blk_count = wl->tx_blocks_available;
        int avail, freed_blocks;
@@ -898,8 +822,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
                clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 
        /* for AP update num of allocated TX blocks per link and ps status */
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               wl12xx_irq_update_links_status(wl, status);
+       wl12xx_for_each_wlvif_ap(wl, wlvif) {
+               wl12xx_irq_update_links_status(wl, wlvif, status);
+       }
 
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
@@ -932,7 +857,7 @@ static void wl1271_netstack_work(struct work_struct *work)
 
 #define WL1271_IRQ_MAX_LOOPS 256
 
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
 {
        int ret;
        u32 intr;
@@ -1054,7 +979,6 @@ out:
 
        return IRQ_HANDLED;
 }
-EXPORT_SYMBOL_GPL(wl1271_irq);
 
 static int wl1271_fetch_firmware(struct wl1271 *wl)
 {
@@ -1069,10 +993,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
 
        wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
 
-       ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+       ret = request_firmware(&fw, fw_name, wl->dev);
 
        if (ret < 0) {
-               wl1271_error("could not get firmware: %d", ret);
+               wl1271_error("could not get firmware %s: %d", fw_name, ret);
                return ret;
        }
 
@@ -1107,10 +1031,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
        const struct firmware *fw;
        int ret;
 
-       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
 
        if (ret < 0) {
-               wl1271_error("could not get nvs file: %d", ret);
+               wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+                            ret);
                return ret;
        }
 
@@ -1217,11 +1142,13 @@ static void wl1271_recovery_work(struct work_struct *work)
 {
        struct wl1271 *wl =
                container_of(work, struct wl1271, recovery_work);
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_vif *vif;
 
        mutex_lock(&wl->mutex);
 
        if (wl->state != WL1271_STATE_ON)
-               goto out;
+               goto out_unlock;
 
        /* Avoid a recursive recovery */
        set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1165,12 @@ static void wl1271_recovery_work(struct work_struct *work)
         * in the firmware during recovery. This doens't hurt if the network is
         * not encrypted.
         */
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
-           test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
-               wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+                   test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+                       wlvif->tx_security_seq +=
+                               WL1271_TX_SQN_POST_RECOVERY_PADDING;
+       }
 
        /* Prevent spurious TX during FW restart */
        ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1181,14 @@ static void wl1271_recovery_work(struct work_struct *work)
        }
 
        /* reboot the chipset */
-       __wl1271_op_remove_interface(wl, false);
+       while (!list_empty(&wl->wlvif_list)) {
+               wlvif = list_first_entry(&wl->wlvif_list,
+                                      struct wl12xx_vif, list);
+               vif = wl12xx_wlvif_to_vif(wlvif);
+               __wl1271_op_remove_interface(wl, vif, false);
+       }
+       mutex_unlock(&wl->mutex);
+       wl1271_op_stop(wl->hw);
 
        clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 
@@ -1262,8 +1199,8 @@ static void wl1271_recovery_work(struct work_struct *work)
         * to restart the HW.
         */
        ieee80211_wake_queues(wl->hw);
-
-out:
+       return;
+out_unlock:
        mutex_unlock(&wl->mutex);
 }
 
@@ -1318,7 +1255,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
        /* 0. read chip id from CHIP_ID */
        wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
 
-       /* 1. check if chip id is valid */
+       /*
+        * For wl127x based devices we could use the default block
+        * size (512 bytes), but due to a bug in the sdio driver, we
+        * need to set it explicitly after the chip is powered on.  To
+        * simplify the code and since the performance impact is
+        * negligible, we use the same block size for all different
+        * chip types.
+        */
+       if (!wl1271_set_block_size(wl))
+               wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
 
        switch (wl->chip.id) {
        case CHIP_ID_1271_PG10:
@@ -1328,7 +1274,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                ret = wl1271_setup(wl);
                if (ret < 0)
                        goto out;
+               wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
                break;
+
        case CHIP_ID_1271_PG20:
                wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
                             wl->chip.id);
@@ -1336,7 +1284,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                ret = wl1271_setup(wl);
                if (ret < 0)
                        goto out;
+               wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
                break;
+
        case CHIP_ID_1283_PG20:
                wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
                             wl->chip.id);
@@ -1344,9 +1294,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
                ret = wl1271_setup(wl);
                if (ret < 0)
                        goto out;
-
-               if (wl1271_set_block_size(wl))
-                       wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
                break;
        case CHIP_ID_1283_PG10:
        default:
@@ -1389,8 +1336,6 @@ int wl1271_plt_start(struct wl1271 *wl)
                goto out;
        }
 
-       wl->bss_type = BSS_TYPE_STA_BSS;
-
        while (retries) {
                retries--;
                ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1427,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
 static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
        struct wl1271 *wl = hw->priv;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_vif *vif = info->control.vif;
+       struct wl12xx_vif *wlvif = NULL;
        unsigned long flags;
        int q, mapping;
-       u8 hlid = 0;
+       u8 hlid;
+
+       if (vif)
+               wlvif = wl12xx_vif_to_data(vif);
 
        mapping = skb_get_queue_mapping(skb);
        q = wl1271_tx_get_queue(mapping);
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
 
        spin_lock_irqsave(&wl->wl_lock, flags);
 
        /* queue the packet */
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               if (!wl1271_is_active_sta(wl, hlid)) {
-                       wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
-                                    hlid, q);
-                       dev_kfree_skb(skb);
-                       goto out;
-               }
-
-               wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
-               skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
-       } else {
-               skb_queue_tail(&wl->tx_queue[q], skb);
+       if (hlid == WL12XX_INVALID_LINK_ID ||
+           (wlvif && !test_bit(hlid, wlvif->links_map))) {
+               wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+               dev_kfree_skb(skb);
+               goto out;
        }
 
+       wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+       skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
        wl->tx_queue_count[q]++;
 
        /*
@@ -1609,13 +1555,14 @@ static struct notifier_block wl1271_dev_notifier = {
 };
 
 #ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+                                       struct wl12xx_vif *wlvif)
 {
        int ret = 0;
 
        mutex_lock(&wl->mutex);
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out_unlock;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1570,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
                goto out_unlock;
 
        /* enter psm if needed*/
-       if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+       if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
                DECLARE_COMPLETION_ONSTACK(compl);
 
-               wl->ps_compl = &compl;
-               ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-                                  wl->basic_rate, true);
+               wlvif->ps_compl = &compl;
+               ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+                                  wlvif->basic_rate, true);
                if (ret < 0)
                        goto out_sleep;
 
@@ -1660,20 +1607,21 @@ out:
 
 }
 
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+                                      struct wl12xx_vif *wlvif)
 {
        int ret = 0;
 
        mutex_lock(&wl->mutex);
 
-       if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
                goto out_unlock;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_unlock;
 
-       ret = wl1271_acx_beacon_filter_opt(wl, true);
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
 
        wl1271_ps_elp_sleep(wl);
 out_unlock:
@@ -1682,20 +1630,22 @@ out_unlock:
 
 }
 
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
-       if (wl->bss_type == BSS_TYPE_STA_BSS)
-               return wl1271_configure_suspend_sta(wl);
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               return wl1271_configure_suspend_ap(wl);
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+               return wl1271_configure_suspend_sta(wl, wlvif);
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               return wl1271_configure_suspend_ap(wl, wlvif);
        return 0;
 }
 
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif)
 {
        int ret;
-       bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
-       bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+       bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+       bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
 
        if (!is_sta && !is_ap)
                return;
@@ -1707,11 +1657,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
 
        if (is_sta) {
                /* exit psm if it wasn't configured */
-               if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
-                       wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                          wl->basic_rate, true);
+               if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+                       wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+                                          wlvif->basic_rate, true);
        } else if (is_ap) {
-               wl1271_acx_beacon_filter_opt(wl, false);
+               wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        }
 
        wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1673,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
                            struct cfg80211_wowlan *wow)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
        WARN_ON(!wow || !wow->any);
 
        wl->wow_enabled = true;
-       ret = wl1271_configure_suspend(wl);
-       if (ret < 0) {
-               wl1271_warning("couldn't prepare device to suspend");
-               return ret;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl1271_configure_suspend(wl, wlvif);
+               if (ret < 0) {
+                       wl1271_warning("couldn't prepare device to suspend");
+                       return ret;
+               }
        }
        /* flush any remaining work */
        wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1704,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 
        wl1271_enable_interrupts(wl);
        flush_work(&wl->tx_work);
-       flush_delayed_work(&wl->pspoll_work);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               flush_delayed_work(&wlvif->pspoll_work);
+       }
        flush_delayed_work(&wl->elp_work);
 
        return 0;
@@ -1760,6 +1715,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 static int wl1271_op_resume(struct ieee80211_hw *hw)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        unsigned long flags;
        bool run_irq_work = false;
 
@@ -1783,7 +1739,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
                wl1271_irq(0, wl);
                wl1271_enable_interrupts(wl);
        }
-       wl1271_configure_resume(wl);
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               wl1271_configure_resume(wl, wlvif);
+       }
        wl->wow_enabled = false;
 
        return 0;
@@ -1810,20 +1768,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
 
 static void wl1271_op_stop(struct ieee80211_hw *hw)
 {
+       struct wl1271 *wl = hw->priv;
+       int i;
+
        wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+       mutex_lock(&wl->mutex);
+       if (wl->state == WL1271_STATE_OFF) {
+               mutex_unlock(&wl->mutex);
+               return;
+       }
+       /*
+        * this must be before the cancel_work calls below, so that the work
+        * functions don't perform further work.
+        */
+       wl->state = WL1271_STATE_OFF;
+       mutex_unlock(&wl->mutex);
+
+       mutex_lock(&wl_list_mutex);
+       list_del(&wl->list);
+       mutex_unlock(&wl_list_mutex);
+
+       wl1271_disable_interrupts(wl);
+       wl1271_flush_deferred_work(wl);
+       cancel_delayed_work_sync(&wl->scan_complete_work);
+       cancel_work_sync(&wl->netstack_work);
+       cancel_work_sync(&wl->tx_work);
+       cancel_delayed_work_sync(&wl->elp_work);
+
+       /* let's notify MAC80211 about the remaining pending TX frames */
+       wl12xx_tx_reset(wl, true);
+       mutex_lock(&wl->mutex);
+
+       wl1271_power_off(wl);
+
+       wl->band = IEEE80211_BAND_2GHZ;
+
+       wl->rx_counter = 0;
+       wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+       wl->tx_blocks_available = 0;
+       wl->tx_allocated_blocks = 0;
+       wl->tx_results_count = 0;
+       wl->tx_packets_count = 0;
+       wl->time_offset = 0;
+       wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+       wl->ap_fw_ps_map = 0;
+       wl->ap_ps_map = 0;
+       wl->sched_scanning = false;
+       memset(wl->roles_map, 0, sizeof(wl->roles_map));
+       memset(wl->links_map, 0, sizeof(wl->links_map));
+       memset(wl->roc_map, 0, sizeof(wl->roc_map));
+       wl->active_sta_count = 0;
+
+       /* The system link is always allocated */
+       __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+       /*
+        * this is performed after the cancel_work calls and the associated
+        * mutex_lock, so that wl1271_op_add_interface does not accidentally
+        * get executed before all these vars have been reset.
+        */
+       wl->flags = 0;
+
+       wl->tx_blocks_freed = 0;
+
+       for (i = 0; i < NUM_TX_QUEUES; i++) {
+               wl->tx_pkts_freed[i] = 0;
+               wl->tx_allocated_pkts[i] = 0;
+       }
+
+       wl1271_debugfs_reset(wl);
+
+       kfree(wl->fw_status);
+       wl->fw_status = NULL;
+       kfree(wl->tx_res_if);
+       wl->tx_res_if = NULL;
+       kfree(wl->target_mem_map);
+       wl->target_mem_map = NULL;
+
+       mutex_unlock(&wl->mutex);
+}
+
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+       u8 policy = find_first_zero_bit(wl->rate_policies_map,
+                                       WL12XX_MAX_RATE_POLICIES);
+       if (policy >= WL12XX_MAX_RATE_POLICIES)
+               return -EBUSY;
+
+       __set_bit(policy, wl->rate_policies_map);
+       *idx = policy;
+       return 0;
 }
 
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
 {
-       switch (wl->bss_type) {
+       if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+               return;
+
+       __clear_bit(*idx, wl->rate_policies_map);
+       *idx = WL12XX_MAX_RATE_POLICIES;
+}
+
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+       switch (wlvif->bss_type) {
        case BSS_TYPE_AP_BSS:
-               if (wl->p2p)
+               if (wlvif->p2p)
                        return WL1271_ROLE_P2P_GO;
                else
                        return WL1271_ROLE_AP;
 
        case BSS_TYPE_STA_BSS:
-               if (wl->p2p)
+               if (wlvif->p2p)
                        return WL1271_ROLE_P2P_CL;
                else
                        return WL1271_ROLE_STA;
@@ -1832,78 +1889,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
                return WL1271_ROLE_IBSS;
 
        default:
-               wl1271_error("invalid bss_type: %d", wl->bss_type);
+               wl1271_error("invalid bss_type: %d", wlvif->bss_type);
        }
        return WL12XX_INVALID_ROLE_TYPE;
 }
 
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
-                                  struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
-       struct wl1271 *wl = hw->priv;
-       struct wiphy *wiphy = hw->wiphy;
-       int retries = WL1271_BOOT_RETRIES;
-       int ret = 0;
-       u8 role_type;
-       bool booted = false;
-
-       wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
-                    ieee80211_vif_type_p2p(vif), vif->addr);
-
-       mutex_lock(&wl->mutex);
-       if (wl->vif) {
-               wl1271_debug(DEBUG_MAC80211,
-                            "multiple vifs are not supported yet");
-               ret = -EBUSY;
-               goto out;
-       }
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int i;
 
-       /*
-        * in some very corner case HW recovery scenarios its possible to
-        * get here before __wl1271_op_remove_interface is complete, so
-        * opt out if that is the case.
-        */
-       if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
-               ret = -EBUSY;
-               goto out;
-       }
+       /* clear everything but the persistent data */
+       memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
 
        switch (ieee80211_vif_type_p2p(vif)) {
        case NL80211_IFTYPE_P2P_CLIENT:
-               wl->p2p = 1;
+               wlvif->p2p = 1;
                /* fall-through */
        case NL80211_IFTYPE_STATION:
-               wl->bss_type = BSS_TYPE_STA_BSS;
-               wl->set_bss_type = BSS_TYPE_STA_BSS;
+               wlvif->bss_type = BSS_TYPE_STA_BSS;
                break;
        case NL80211_IFTYPE_ADHOC:
-               wl->bss_type = BSS_TYPE_IBSS;
-               wl->set_bss_type = BSS_TYPE_STA_BSS;
+               wlvif->bss_type = BSS_TYPE_IBSS;
                break;
        case NL80211_IFTYPE_P2P_GO:
-               wl->p2p = 1;
+               wlvif->p2p = 1;
                /* fall-through */
        case NL80211_IFTYPE_AP:
-               wl->bss_type = BSS_TYPE_AP_BSS;
+               wlvif->bss_type = BSS_TYPE_AP_BSS;
                break;
        default:
-               ret = -EOPNOTSUPP;
-               goto out;
+               wlvif->bss_type = MAX_BSS_TYPE;
+               return -EOPNOTSUPP;
        }
 
-       role_type = wl12xx_get_role_type(wl);
-       if (role_type == WL12XX_INVALID_ROLE_TYPE) {
-               ret = -EINVAL;
-               goto out;
+       wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+       wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+       wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+           wlvif->bss_type == BSS_TYPE_IBSS) {
+               /* init sta/ibss data */
+               wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+               wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+               wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+       } else {
+               /* init ap data */
+               wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+               wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+               wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+               for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+                       wl12xx_allocate_rate_policy(wl,
+                                               &wlvif->ap.ucast_rate_idx[i]);
        }
-       memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
 
-       if (wl->state != WL1271_STATE_OFF) {
-               wl1271_error("cannot start because not in off state: %d",
-                            wl->state);
-               ret = -EBUSY;
-               goto out;
-       }
+       wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+       wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+       wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+       wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+       wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+       wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+       /*
+        * mac80211 configures some values globally, while we treat them
+        * per-interface. thus, on init, we have to copy them from wl
+        */
+       wlvif->band = wl->band;
+       wlvif->channel = wl->channel;
+       wlvif->power_level = wl->power_level;
+
+       INIT_WORK(&wlvif->rx_streaming_enable_work,
+                 wl1271_rx_streaming_enable_work);
+       INIT_WORK(&wlvif->rx_streaming_disable_work,
+                 wl1271_rx_streaming_disable_work);
+       INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+       INIT_LIST_HEAD(&wlvif->list);
+
+       setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+                   (unsigned long) wlvif);
+       return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+       int retries = WL1271_BOOT_RETRIES;
+       bool booted = false;
+       struct wiphy *wiphy = wl->hw->wiphy;
+       int ret;
 
        while (retries) {
                retries--;
@@ -1915,25 +1989,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                if (ret < 0)
                        goto power_off;
 
-               if (wl->bss_type == BSS_TYPE_STA_BSS ||
-                   wl->bss_type == BSS_TYPE_IBSS) {
-                       /*
-                        * The device role is a special role used for
-                        * rx and tx frames prior to association (as
-                        * the STA role can get packets only from
-                        * its associated bssid)
-                        */
-                       ret = wl12xx_cmd_role_enable(wl,
-                                                        WL1271_ROLE_DEVICE,
-                                                        &wl->dev_role_id);
-                       if (ret < 0)
-                               goto irq_disable;
-               }
-
-               ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
-               if (ret < 0)
-                       goto irq_disable;
-
                ret = wl1271_hw_init(wl);
                if (ret < 0)
                        goto irq_disable;
@@ -1964,9 +2019,6 @@ power_off:
                goto out;
        }
 
-       wl->vif = vif;
-       wl->state = WL1271_STATE_ON;
-       set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
        wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
 
        /* update hw/fw version info in wiphy struct */
@@ -1984,7 +2036,110 @@ power_off:
        wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
                     wl->enable_11a ? "" : "not ");
 
+       wl->state = WL1271_STATE_ON;
+out:
+       return booted;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int ret = 0;
+       u8 role_type;
+       bool booted = false;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+                    ieee80211_vif_type_p2p(vif), vif->addr);
+
+       mutex_lock(&wl->mutex);
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_unlock;
+
+       if (wl->vif) {
+               wl1271_debug(DEBUG_MAC80211,
+                            "multiple vifs are not supported yet");
+               ret = -EBUSY;
+               goto out;
+       }
+
+       /*
+        * in some very corner case HW recovery scenarios its possible to
+        * get here before __wl1271_op_remove_interface is complete, so
+        * opt out if that is the case.
+        */
+       if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+           test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ret = wl12xx_init_vif_data(wl, vif);
+       if (ret < 0)
+               goto out;
+
+       wlvif->wl = wl;
+       role_type = wl12xx_get_role_type(wl, wlvif);
+       if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /*
+        * TODO: after the nvs issue will be solved, move this block
+        * to start(), and make sure here the driver is ON.
+        */
+       if (wl->state == WL1271_STATE_OFF) {
+               /*
+                * we still need this in order to configure the fw
+                * while uploading the nvs
+                */
+               memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+               booted = wl12xx_init_fw(wl);
+               if (!booted) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+           wlvif->bss_type == BSS_TYPE_IBSS) {
+               /*
+                * The device role is a special role used for
+                * rx and tx frames prior to association (as
+                * the STA role can get packets only from
+                * its associated bssid)
+                */
+               ret = wl12xx_cmd_role_enable(wl, vif->addr,
+                                                WL1271_ROLE_DEVICE,
+                                                &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = wl12xx_cmd_role_enable(wl, vif->addr,
+                                    role_type, &wlvif->role_id);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_init_vif_specific(wl, vif);
+       if (ret < 0)
+               goto out;
+
+       wl->vif = vif;
+       list_add(&wlvif->list, &wl->wlvif_list);
+       set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               wl->ap_count++;
+       else
+               wl->sta_count++;
 out:
+       wl1271_ps_elp_sleep(wl);
+out_unlock:
        mutex_unlock(&wl->mutex);
 
        mutex_lock(&wl_list_mutex);
@@ -1996,29 +2151,34 @@ out:
 }
 
 static void __wl1271_op_remove_interface(struct wl1271 *wl,
+                                        struct ieee80211_vif *vif,
                                         bool reset_tx_queues)
 {
-       int ret, i;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int i, ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
 
+       if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+               return;
+
+       wl->vif = NULL;
+
        /* because of hardware recovery, we may get here twice */
        if (wl->state != WL1271_STATE_ON)
                return;
 
        wl1271_info("down");
 
-       mutex_lock(&wl_list_mutex);
-       list_del(&wl->list);
-       mutex_unlock(&wl_list_mutex);
-
        /* enable dyn ps just in case (if left on due to fw crash etc) */
-       if (wl->bss_type == BSS_TYPE_STA_BSS)
-               ieee80211_enable_dyn_ps(wl->vif);
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+               ieee80211_enable_dyn_ps(vif);
 
-       if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+       if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+           wl->scan_vif == vif) {
                wl->scan.state = WL1271_SCAN_STATE_IDLE;
                memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+               wl->scan_vif = NULL;
                wl->scan.req = NULL;
                ieee80211_scan_completed(wl->hw, true);
        }
@@ -2029,13 +2189,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                if (ret < 0)
                        goto deinit;
 
-               if (wl->bss_type == BSS_TYPE_STA_BSS) {
-                       ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+               if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+                       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
                        if (ret < 0)
                                goto deinit;
                }
 
-               ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+               ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
                if (ret < 0)
                        goto deinit;
 
@@ -2043,120 +2203,82 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
        }
 deinit:
        /* clear all hlids (except system_hlid) */
-       wl->sta_hlid = WL12XX_INVALID_LINK_ID;
-       wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-       wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
-       wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+       wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+           wlvif->bss_type == BSS_TYPE_IBSS) {
+               wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+               wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+               wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+       } else {
+               wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+               wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+               wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+               wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+               for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+                       wl12xx_free_rate_policy(wl,
+                                               &wlvif->ap.ucast_rate_idx[i]);
+       }
 
-       /*
-        * this must be before the cancel_work calls below, so that the work
-        * functions don't perform further work.
-        */
-       wl->state = WL1271_STATE_OFF;
+       wl12xx_tx_reset_wlvif(wl, wlvif);
+       wl1271_free_ap_keys(wl, wlvif);
+       if (wl->last_wlvif == wlvif)
+               wl->last_wlvif = NULL;
+       list_del(&wlvif->list);
+       memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+       wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+       wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               wl->ap_count--;
+       else
+               wl->sta_count--;
 
        mutex_unlock(&wl->mutex);
-
-       wl1271_disable_interrupts(wl);
-       wl1271_flush_deferred_work(wl);
-       cancel_delayed_work_sync(&wl->scan_complete_work);
-       cancel_work_sync(&wl->netstack_work);
-       cancel_work_sync(&wl->tx_work);
-       del_timer_sync(&wl->rx_streaming_timer);
-       cancel_work_sync(&wl->rx_streaming_enable_work);
-       cancel_work_sync(&wl->rx_streaming_disable_work);
-       cancel_delayed_work_sync(&wl->pspoll_work);
-       cancel_delayed_work_sync(&wl->elp_work);
+       del_timer_sync(&wlvif->rx_streaming_timer);
+       cancel_work_sync(&wlvif->rx_streaming_enable_work);
+       cancel_work_sync(&wlvif->rx_streaming_disable_work);
+       cancel_delayed_work_sync(&wlvif->pspoll_work);
 
        mutex_lock(&wl->mutex);
-
-       /* let's notify MAC80211 about the remaining pending TX frames */
-       wl1271_tx_reset(wl, reset_tx_queues);
-       wl1271_power_off(wl);
-
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
-       wl->ssid_len = 0;
-       wl->bss_type = MAX_BSS_TYPE;
-       wl->set_bss_type = MAX_BSS_TYPE;
-       wl->p2p = 0;
-       wl->band = IEEE80211_BAND_2GHZ;
-
-       wl->rx_counter = 0;
-       wl->psm_entry_retry = 0;
-       wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-       wl->tx_blocks_available = 0;
-       wl->tx_allocated_blocks = 0;
-       wl->tx_results_count = 0;
-       wl->tx_packets_count = 0;
-       wl->time_offset = 0;
-       wl->session_counter = 0;
-       wl->rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-       wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
-       wl->vif = NULL;
-       wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-       wl1271_free_ap_keys(wl);
-       memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
-       wl->ap_fw_ps_map = 0;
-       wl->ap_ps_map = 0;
-       wl->sched_scanning = false;
-       wl->role_id = WL12XX_INVALID_ROLE_ID;
-       wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
-       memset(wl->roles_map, 0, sizeof(wl->roles_map));
-       memset(wl->links_map, 0, sizeof(wl->links_map));
-       memset(wl->roc_map, 0, sizeof(wl->roc_map));
-       wl->active_sta_count = 0;
-
-       /* The system link is always allocated */
-       __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
-       /*
-        * this is performed after the cancel_work calls and the associated
-        * mutex_lock, so that wl1271_op_add_interface does not accidentally
-        * get executed before all these vars have been reset.
-        */
-       wl->flags = 0;
-
-       wl->tx_blocks_freed = 0;
-
-       for (i = 0; i < NUM_TX_QUEUES; i++) {
-               wl->tx_pkts_freed[i] = 0;
-               wl->tx_allocated_pkts[i] = 0;
-       }
-
-       wl1271_debugfs_reset(wl);
-
-       kfree(wl->fw_status);
-       wl->fw_status = NULL;
-       kfree(wl->tx_res_if);
-       wl->tx_res_if = NULL;
-       kfree(wl->target_mem_map);
-       wl->target_mem_map = NULL;
 }
 
 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct wl12xx_vif *iter;
 
        mutex_lock(&wl->mutex);
+
+       if (wl->state == WL1271_STATE_OFF ||
+           !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+               goto out;
+
        /*
         * wl->vif can be null here if someone shuts down the interface
         * just when hardware recovery has been started.
         */
-       if (wl->vif) {
-               WARN_ON(wl->vif != vif);
-               __wl1271_op_remove_interface(wl, true);
-       }
+       wl12xx_for_each_wlvif(wl, iter) {
+               if (iter != wlvif)
+                       continue;
 
+               __wl1271_op_remove_interface(wl, vif, true);
+               break;
+       }
+       WARN_ON(iter != wlvif);
+out:
        mutex_unlock(&wl->mutex);
        cancel_work_sync(&wl->recovery_work);
 }
 
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         bool set_assoc)
 {
        int ret;
-       bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+       bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
 
        /*
         * One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2289,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
         * Keep the below message for now, unless it starts bothering
         * users who really like to roam a lot :)
         */
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                wl1271_info("JOIN while associated.");
 
        if (set_assoc)
-               set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+               set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
 
        if (is_ibss)
-               ret = wl12xx_cmd_role_start_ibss(wl);
+               ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
        else
-               ret = wl12xx_cmd_role_start_sta(wl);
+               ret = wl12xx_cmd_role_start_sta(wl, wlvif);
        if (ret < 0)
                goto out;
 
-       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
                goto out;
 
        /*
@@ -2189,19 +2311,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
         * the join. The acx_aid starts the keep-alive process, and the order
         * of the commands below is relevant.
         */
-       ret = wl1271_acx_keep_alive_mode(wl, true);
+       ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_aid(wl, wl->aid);
+       ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_cmd_build_klv_null_data(wl);
+       ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+       ret = wl1271_acx_keep_alive_config(wl, wlvif,
+                                          CMD_TEMPL_KLV_IDX_NULL_DATA,
                                           ACX_KEEP_ALIVE_TPL_VALID);
        if (ret < 0)
                goto out;
@@ -2210,34 +2333,34 @@ out:
        return ret;
 }
 
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
-       if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+       if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+               struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
                wl12xx_cmd_stop_channel_switch(wl);
-               ieee80211_chswitch_done(wl->vif, false);
+               ieee80211_chswitch_done(vif, false);
        }
 
        /* to stop listening to a channel, we disconnect */
-       ret = wl12xx_cmd_role_stop_sta(wl);
+       ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
        if (ret < 0)
                goto out;
 
-       memset(wl->bssid, 0, ETH_ALEN);
-
        /* reset TX security counters on a clean disconnect */
-       wl->tx_security_last_seq_lsb = 0;
-       wl->tx_security_seq = 0;
+       wlvif->tx_security_last_seq_lsb = 0;
+       wlvif->tx_security_seq = 0;
 
 out:
        return ret;
 }
 
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
-       wl->basic_rate_set = wl->bitrate_masks[wl->band];
-       wl->rate_set = wl->basic_rate_set;
+       wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+       wlvif->rate_set = wlvif->basic_rate_set;
 }
 
 static bool wl12xx_is_roc(struct wl1271 *wl)
@@ -2251,27 +2374,25 @@ static bool wl12xx_is_roc(struct wl1271 *wl)
        return true;
 }
 
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                 bool idle)
 {
        int ret;
 
        if (idle) {
                /* no need to croc if we weren't busy (e.g. during boot) */
                if (wl12xx_is_roc(wl)) {
-                       ret = wl12xx_croc(wl, wl->dev_role_id);
-                       if (ret < 0)
-                               goto out;
-
-                       ret = wl12xx_cmd_role_stop_dev(wl);
+                       ret = wl12xx_stop_dev(wl, wlvif);
                        if (ret < 0)
                                goto out;
                }
-               wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-               ret = wl1271_acx_sta_rate_policies(wl);
+               wlvif->rate_set =
+                       wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+               ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                if (ret < 0)
                        goto out;
                ret = wl1271_acx_keep_alive_config(
-                       wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+                       wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
                        ACX_KEEP_ALIVE_TPL_INVALID);
                if (ret < 0)
                        goto out;
@@ -2283,75 +2404,32 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
                        ieee80211_sched_scan_stopped(wl->hw);
                }
 
-               ret = wl12xx_cmd_role_start_dev(wl);
-               if (ret < 0)
-                       goto out;
-
-               ret = wl12xx_roc(wl, wl->dev_role_id);
-               if (ret < 0)
-                       goto out;
-               clear_bit(WL1271_FLAG_IDLE, &wl->flags);
-       }
-
-out:
-       return ret;
-}
-
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
-{
-       struct wl1271 *wl = hw->priv;
-       struct ieee80211_conf *conf = &hw->conf;
-       int channel, ret = 0;
-       bool is_ap;
-
-       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
-
-       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
-                    " changed 0x%x",
-                    channel,
-                    conf->flags & IEEE80211_CONF_PS ? "on" : "off",
-                    conf->power_level,
-                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
-                        changed);
-
-       /*
-        * mac80211 will go to idle nearly immediately after transmitting some
-        * frames, such as the deauth. To make sure those frames reach the air,
-        * wait here until the TX queue is fully flushed.
-        */
-       if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
-           (conf->flags & IEEE80211_CONF_IDLE))
-               wl1271_tx_flush(wl);
-
-       mutex_lock(&wl->mutex);
-
-       if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               /* we support configuring the channel and band while off */
-               if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
-                       wl->band = conf->channel->band;
-                       wl->channel = channel;
-               }
-
-               if ((changed & IEEE80211_CONF_CHANGE_POWER))
-                       wl->power_level = conf->power_level;
-
-               goto out;
+               ret = wl12xx_start_dev(wl, wlvif);
+               if (ret < 0)
+                       goto out;
+               clear_bit(WL1271_FLAG_IDLE, &wl->flags);
        }
 
-       is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+out:
+       return ret;
+}
+
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                            struct ieee80211_conf *conf, u32 changed)
+{
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+       int channel, ret;
 
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out;
+       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
 
        /* if the channel changes while joined, join again */
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
-           ((wl->band != conf->channel->band) ||
-            (wl->channel != channel))) {
+           ((wlvif->band != conf->channel->band) ||
+            (wlvif->channel != channel))) {
                /* send all pending packets */
                wl1271_tx_work_locked(wl);
-               wl->band = conf->channel->band;
-               wl->channel = channel;
+               wlvif->band = conf->channel->band;
+               wlvif->channel = channel;
 
                if (!is_ap) {
                        /*
@@ -2360,24 +2438,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                         * possible rate for the band as a fixed rate for
                         * association frames and other control messages.
                         */
-                       if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
-                               wl1271_set_band_rate(wl);
+                       if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+                               wl1271_set_band_rate(wl, wlvif);
 
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                wl1271_warning("rate policy for channel "
                                               "failed %d", ret);
 
-                       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+                       if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+                                    &wlvif->flags)) {
                                if (wl12xx_is_roc(wl)) {
                                        /* roaming */
-                                       ret = wl12xx_croc(wl, wl->dev_role_id);
+                                       ret = wl12xx_croc(wl,
+                                                         wlvif->dev_role_id);
                                        if (ret < 0)
-                                               goto out_sleep;
+                                               return ret;
                                }
-                               ret = wl1271_join(wl, false);
+                               ret = wl1271_join(wl, wlvif, false);
                                if (ret < 0)
                                        wl1271_warning("cmd join on channel "
                                                       "failed %d", ret);
@@ -2389,64 +2470,112 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
                                 */
                                if (wl12xx_is_roc(wl) &&
                                    !(conf->flags & IEEE80211_CONF_IDLE)) {
-                                       ret = wl12xx_croc(wl, wl->dev_role_id);
+                                       ret = wl12xx_stop_dev(wl, wlvif);
                                        if (ret < 0)
-                                               goto out_sleep;
+                                               return ret;
 
-                                       ret = wl12xx_roc(wl, wl->dev_role_id);
+                                       ret = wl12xx_start_dev(wl, wlvif);
                                        if (ret < 0)
-                                               wl1271_warning("roc failed %d",
-                                                              ret);
+                                               return ret;
                                }
                        }
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
-               ret = wl1271_sta_handle_idle(wl,
-                                       conf->flags & IEEE80211_CONF_IDLE);
-               if (ret < 0)
-                       wl1271_warning("idle mode change failed %d", ret);
-       }
-
        /*
         * if mac80211 changes the PSM mode, make sure the mode is not
         * incorrectly changed after the pspoll failure active window.
         */
        if (changed & IEEE80211_CONF_CHANGE_PS)
-               clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
+               clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
 
        if (conf->flags & IEEE80211_CONF_PS &&
-           !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
-               set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+           !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+               set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
 
                /*
                 * We enter PSM only if we're already associated.
                 * If we're not, we'll enter it when joining an SSID,
                 * through the bss_info_changed() hook.
                 */
-               if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+               if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
                        wl1271_debug(DEBUG_PSM, "psm enabled");
-                       ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
-                                                wl->basic_rate, true);
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_POWER_SAVE_MODE,
+                                                wlvif->basic_rate, true);
                }
        } else if (!(conf->flags & IEEE80211_CONF_PS) &&
-                  test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+                  test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
                wl1271_debug(DEBUG_PSM, "psm disabled");
 
-               clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+               clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
 
-               if (test_bit(WL1271_FLAG_PSM, &wl->flags))
-                       ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
-                                                wl->basic_rate, true);
+               if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+                       ret = wl1271_ps_set_mode(wl, wlvif,
+                                                STATION_ACTIVE_MODE,
+                                                wlvif->basic_rate, true);
        }
 
-       if (conf->power_level != wl->power_level) {
-               ret = wl1271_acx_tx_power(wl, conf->power_level);
+       if (conf->power_level != wlvif->power_level) {
+               ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
                if (ret < 0)
-                       goto out_sleep;
+                       return ret;
+
+               wlvif->power_level = conf->power_level;
+       }
+
+       return 0;
+}
+
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
+       struct ieee80211_conf *conf = &hw->conf;
+       int channel, ret = 0;
+
+       channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+                    " changed 0x%x",
+                    channel,
+                    conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+                    conf->power_level,
+                    conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+                        changed);
+
+       /*
+        * mac80211 will go to idle nearly immediately after transmitting some
+        * frames, such as the deauth. To make sure those frames reach the air,
+        * wait here until the TX queue is fully flushed.
+        */
+       if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+           (conf->flags & IEEE80211_CONF_IDLE))
+               wl1271_tx_flush(wl);
+
+       mutex_lock(&wl->mutex);
+
+       /* we support configuring the channel and band even while off */
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               wl->band = conf->channel->band;
+               wl->channel = channel;
+       }
 
+       if (changed & IEEE80211_CONF_CHANGE_POWER)
                wl->power_level = conf->power_level;
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       /* configure each interface */
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl12xx_config_vif(wl, wlvif, conf, changed);
+               if (ret < 0)
+                       goto out_sleep;
        }
 
 out_sleep:
@@ -2509,6 +2638,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
 {
        struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
+
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2657,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS) {
-               if (*total & FIF_ALLMULTI)
-                       ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
-               else if (fp)
-                       ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
-                                                          fp->mc_list,
-                                                          fp->mc_list_length);
-               if (ret < 0)
-                       goto out_sleep;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+                       if (*total & FIF_ALLMULTI)
+                               ret = wl1271_acx_group_address_tbl(wl, wlvif,
+                                                                  false,
+                                                                  NULL, 0);
+                       else if (fp)
+                               ret = wl1271_acx_group_address_tbl(wl, wlvif,
+                                                       fp->enabled,
+                                                       fp->mc_list,
+                                                       fp->mc_list_length);
+                       if (ret < 0)
+                               goto out_sleep;
+               }
        }
 
        /*
@@ -2551,9 +2687,10 @@ out:
        kfree(fp);
 }
 
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
-                       u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
-                       u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                               u8 id, u8 key_type, u8 key_size,
+                               const u8 *key, u8 hlid, u32 tx_seq_32,
+                               u16 tx_seq_16)
 {
        struct wl1271_ap_key *ap_key;
        int i;
@@ -2568,10 +2705,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
         * an existing key.
         */
        for (i = 0; i < MAX_NUM_KEYS; i++) {
-               if (wl->recorded_ap_keys[i] == NULL)
+               if (wlvif->ap.recorded_keys[i] == NULL)
                        break;
 
-               if (wl->recorded_ap_keys[i]->id == id) {
+               if (wlvif->ap.recorded_keys[i]->id == id) {
                        wl1271_warning("trying to record key replacement");
                        return -EINVAL;
                }
@@ -2592,21 +2729,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
        ap_key->tx_seq_32 = tx_seq_32;
        ap_key->tx_seq_16 = tx_seq_16;
 
-       wl->recorded_ap_keys[i] = ap_key;
+       wlvif->ap.recorded_keys[i] = ap_key;
        return 0;
 }
 
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i;
 
        for (i = 0; i < MAX_NUM_KEYS; i++) {
-               kfree(wl->recorded_ap_keys[i]);
-               wl->recorded_ap_keys[i] = NULL;
+               kfree(wlvif->ap.recorded_keys[i]);
+               wlvif->ap.recorded_keys[i] = NULL;
        }
 }
 
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i, ret = 0;
        struct wl1271_ap_key *key;
@@ -2614,15 +2751,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
 
        for (i = 0; i < MAX_NUM_KEYS; i++) {
                u8 hlid;
-               if (wl->recorded_ap_keys[i] == NULL)
+               if (wlvif->ap.recorded_keys[i] == NULL)
                        break;
 
-               key = wl->recorded_ap_keys[i];
+               key = wlvif->ap.recorded_keys[i];
                hlid = key->hlid;
                if (hlid == WL12XX_INVALID_LINK_ID)
-                       hlid = wl->ap_bcast_hlid;
+                       hlid = wlvif->ap.bcast_hlid;
 
-               ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+               ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
                                            key->id, key->key_type,
                                            key->key_size, key->key,
                                            hlid, key->tx_seq_32,
@@ -2635,23 +2772,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
        }
 
        if (wep_key_added) {
-               ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
-                                                    wl->ap_bcast_hlid);
+               ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+                                                    wlvif->ap.bcast_hlid);
                if (ret < 0)
                        goto out;
        }
 
 out:
-       wl1271_free_ap_keys(wl);
+       wl1271_free_ap_keys(wl, wlvif);
        return ret;
 }
 
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      u16 action, u8 id, u8 key_type,
                       u8 key_size, const u8 *key, u32 tx_seq_32,
                       u16 tx_seq_16, struct ieee80211_sta *sta)
 {
        int ret;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
        if (is_ap) {
                struct wl1271_station *wl_sta;
@@ -2661,10 +2799,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                        wl_sta = (struct wl1271_station *)sta->drv_priv;
                        hlid = wl_sta->hlid;
                } else {
-                       hlid = wl->ap_bcast_hlid;
+                       hlid = wlvif->ap.bcast_hlid;
                }
 
-               if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+               if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
                        /*
                         * We do not support removing keys after AP shutdown.
                         * Pretend we do to make mac80211 happy.
@@ -2672,12 +2810,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                        if (action != KEY_ADD_OR_REPLACE)
                                return 0;
 
-                       ret = wl1271_record_ap_key(wl, id,
+                       ret = wl1271_record_ap_key(wl, wlvif, id,
                                             key_type, key_size,
                                             key, hlid, tx_seq_32,
                                             tx_seq_16);
                } else {
-                       ret = wl1271_cmd_set_ap_key(wl, action,
+                       ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
                                             id, key_type, key_size,
                                             key, hlid, tx_seq_32,
                                             tx_seq_16);
@@ -2718,10 +2856,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
 
                /* don't remove key if hlid was already deleted */
                if (action == KEY_REMOVE &&
-                   wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+                   wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
                        return 0;
 
-               ret = wl1271_cmd_set_sta_key(wl, action,
+               ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
                                             id, key_type, key_size,
                                             key, addr, tx_seq_32,
                                             tx_seq_16);
@@ -2731,8 +2869,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
                /* the default WEP key needs to be configured at least once */
                if (key_type == KEY_WEP) {
                        ret = wl12xx_cmd_set_default_wep_key(wl,
-                                                            wl->default_key,
-                                                            wl->sta_hlid);
+                                                       wlvif->default_key,
+                                                       wlvif->sta.hlid);
                        if (ret < 0)
                                return ret;
                }
@@ -2747,6 +2885,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                             struct ieee80211_key_conf *key_conf)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
        u32 tx_seq_32 = 0;
        u16 tx_seq_16 = 0;
@@ -2782,20 +2921,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                key_type = KEY_TKIP;
 
                key_conf->hw_key_idx = key_conf->keyidx;
-               tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-               tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+               tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+               tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                key_type = KEY_AES;
 
-               key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-               tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-               tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+               key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+               tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
                break;
        case WL1271_CIPHER_SUITE_GEM:
                key_type = KEY_GEM;
-               tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
-               tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+               tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+               tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
                break;
        default:
                wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2945,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        switch (cmd) {
        case SET_KEY:
-               ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+               ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
                                 key_conf->keyidx, key_type,
                                 key_conf->keylen, key_conf->key,
                                 tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2956,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                break;
 
        case DISABLE_KEY:
-               ret = wl1271_set_key(wl, KEY_REMOVE,
+               ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
                                     key_conf->keyidx, key_type,
                                     key_conf->keylen, key_conf->key,
                                     0, 0, sta);
@@ -2847,6 +2986,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                             struct cfg80211_scan_request *req)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
        int ret;
        u8 *ssid = NULL;
        size_t len = 0;
@@ -2876,16 +3017,15 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
 
        /* cancel ROC before scanning */
        if (wl12xx_is_roc(wl)) {
-               if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+               if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
                        /* don't allow scanning right now */
                        ret = -EBUSY;
                        goto out_sleep;
                }
-               wl12xx_croc(wl, wl->dev_role_id);
-               wl12xx_cmd_role_stop_dev(wl);
+               wl12xx_stop_dev(wl, wlvif);
        }
 
-       ret = wl1271_scan(hw->priv, ssid, len, req);
+       ret = wl1271_scan(hw->priv, vif, ssid, len, req);
 out_sleep:
        wl1271_ps_elp_sleep(wl);
 out:
@@ -2921,6 +3061,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
        }
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+       wl->scan_vif = NULL;
        wl->scan.req = NULL;
        ieee80211_scan_completed(wl->hw, true);
 
@@ -2938,6 +3079,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
                                      struct ieee80211_sched_scan_ies *ies)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3090,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       ret = wl1271_scan_sched_scan_config(wl, req, ies);
+       ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
        if (ret < 0)
                goto out_sleep;
 
-       ret = wl1271_scan_sched_scan_start(wl);
+       ret = wl1271_scan_sched_scan_start(wl, wlvif);
        if (ret < 0)
                goto out_sleep;
 
@@ -3017,6 +3159,7 @@ out:
 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        int ret = 0;
 
        mutex_lock(&wl->mutex);
@@ -3030,10 +3173,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        if (ret < 0)
                goto out;
 
-       ret = wl1271_acx_rts_threshold(wl, value);
-       if (ret < 0)
-               wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+               if (ret < 0)
+                       wl1271_warning("set rts threshold failed: %d", ret);
+       }
        wl1271_ps_elp_sleep(wl);
 
 out:
@@ -3042,9 +3186,10 @@ out:
        return ret;
 }
 
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
                            int offset)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        u8 ssid_len;
        const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
                                         skb->len - offset);
@@ -3060,8 +3205,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
                return -EINVAL;
        }
 
-       wl->ssid_len = ssid_len;
-       memcpy(wl->ssid, ptr+2, ssid_len);
+       wlvif->ssid_len = ssid_len;
+       memcpy(wlvif->ssid, ptr+2, ssid_len);
        return 0;
 }
 
@@ -3096,18 +3241,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
        skb_trim(skb, skb->len - len);
 }
 
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
-                                        u8 *probe_rsp_data,
-                                        size_t probe_rsp_len,
-                                        u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+                                        struct ieee80211_vif *vif)
+{
+       struct sk_buff *skb;
+       int ret;
+
+       skb = ieee80211_proberesp_get(wl->hw, vif);
+       if (!skb)
+               return -EOPNOTSUPP;
+
+       ret = wl1271_cmd_template_set(wl,
+                                     CMD_TEMPL_AP_PROBE_RESPONSE,
+                                     skb->data,
+                                     skb->len, 0,
+                                     rates);
+
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+                                            struct ieee80211_vif *vif,
+                                            u8 *probe_rsp_data,
+                                            size_t probe_rsp_len,
+                                            u32 rates)
 {
-       struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
        int ssid_ie_offset, ie_offset, templ_len;
        const u8 *ptr;
 
        /* no need to change probe response if the SSID is set correctly */
-       if (wl->ssid_len > 0)
+       if (wlvif->ssid_len > 0)
                return wl1271_cmd_template_set(wl,
                                               CMD_TEMPL_AP_PROBE_RESPONSE,
                                               probe_rsp_data,
@@ -3153,16 +3320,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
 }
 
 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+                                      struct ieee80211_vif *vif,
                                       struct ieee80211_bss_conf *bss_conf,
                                       u32 changed)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
                if (bss_conf->use_short_slot)
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+                       ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
                else
-                       ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+                       ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
                if (ret < 0) {
                        wl1271_warning("Set slot time failed %d", ret);
                        goto out;
@@ -3171,16 +3340,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
 
        if (changed & BSS_CHANGED_ERP_PREAMBLE) {
                if (bss_conf->use_short_preamble)
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+                       wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
                else
-                       wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+                       wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
        }
 
        if (changed & BSS_CHANGED_ERP_CTS_PROT) {
                if (bss_conf->use_cts_prot)
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+                       ret = wl1271_acx_cts_protect(wl, wlvif,
+                                                    CTSPROTECT_ENABLE);
                else
-                       ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+                       ret = wl1271_acx_cts_protect(wl, wlvif,
+                                                    CTSPROTECT_DISABLE);
                if (ret < 0) {
                        wl1271_warning("Set ctsprotect failed %d", ret);
                        goto out;
@@ -3196,14 +3367,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                                          struct ieee80211_bss_conf *bss_conf,
                                          u32 changed)
 {
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret = 0;
 
        if ((changed & BSS_CHANGED_BEACON_INT)) {
                wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
                        bss_conf->beacon_int);
 
-               wl->beacon_int = bss_conf->beacon_int;
+               wlvif->beacon_int = bss_conf->beacon_int;
+       }
+
+       if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+               u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+               if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+                       wl1271_debug(DEBUG_AP, "probe response updated");
+                       set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+               }
        }
 
        if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3394,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
                u16 tmpl_id;
 
-               if (!beacon)
+               if (!beacon) {
+                       ret = -EINVAL;
                        goto out;
+               }
 
                wl1271_debug(DEBUG_MASTER, "beacon updated");
 
-               ret = wl1271_ssid_set(wl, beacon, ieoffset);
+               ret = wl1271_ssid_set(vif, beacon, ieoffset);
                if (ret < 0) {
                        dev_kfree_skb(beacon);
                        goto out;
                }
-               min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+               min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
                tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
                                  CMD_TEMPL_BEACON;
                ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3418,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                        goto out;
                }
 
+               /*
+                * In case we already have a probe-resp beacon set explicitly
+                * by usermode, don't use the beacon data.
+                */
+               if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+                       goto end_bcn;
+
                /* remove TIM ie from probe response */
                wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
 
@@ -3254,7 +3443,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                                 IEEE80211_STYPE_PROBE_RESP);
                if (is_ap)
-                       ret = wl1271_ap_set_probe_resp_tmpl(wl,
+                       ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
                                                beacon->data,
                                                beacon->len,
                                                min_rate);
@@ -3264,12 +3453,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
                                                beacon->data,
                                                beacon->len, 0,
                                                min_rate);
+end_bcn:
                dev_kfree_skb(beacon);
                if (ret < 0)
                        goto out;
        }
 
 out:
+       if (ret != 0)
+               wl1271_error("beacon info change failed: %d", ret);
        return ret;
 }
 
@@ -3279,23 +3471,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
                                       struct ieee80211_bss_conf *bss_conf,
                                       u32 changed)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
 
        if ((changed & BSS_CHANGED_BASIC_RATES)) {
                u32 rates = bss_conf->basic_rates;
 
-               wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
-                                                                wl->band);
-               wl->basic_rate = wl1271_tx_min_rate_get(wl,
-                                                       wl->basic_rate_set);
+               wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+                                                                wlvif->band);
+               wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+                                                       wlvif->basic_rate_set);
 
-               ret = wl1271_init_ap_rates(wl);
+               ret = wl1271_init_ap_rates(wl, wlvif);
                if (ret < 0) {
                        wl1271_error("AP rate policy change failed %d", ret);
                        goto out;
                }
 
-               ret = wl1271_ap_init_templates(wl);
+               ret = wl1271_ap_init_templates(wl, vif);
                if (ret < 0)
                        goto out;
        }
@@ -3306,38 +3499,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
 
        if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
                if (bss_conf->enable_beacon) {
-                       if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
-                               ret = wl12xx_cmd_role_start_ap(wl);
+                       if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+                               ret = wl12xx_cmd_role_start_ap(wl, wlvif);
                                if (ret < 0)
                                        goto out;
 
-                               ret = wl1271_ap_init_hwenc(wl);
+                               ret = wl1271_ap_init_hwenc(wl, wlvif);
                                if (ret < 0)
                                        goto out;
 
-                               set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
                                wl1271_debug(DEBUG_AP, "started AP");
                        }
                } else {
-                       if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
-                               ret = wl12xx_cmd_role_stop_ap(wl);
+                       if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+                               ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
                                if (ret < 0)
                                        goto out;
 
-                               clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+                               clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+                               clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+                                         &wlvif->flags);
                                wl1271_debug(DEBUG_AP, "stopped AP");
                        }
                }
        }
 
-       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
        if (ret < 0)
                goto out;
 
        /* Handle HT information change */
        if ((changed & BSS_CHANGED_HT) &&
            (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-               ret = wl1271_acx_set_ht_information(wl,
+               ret = wl1271_acx_set_ht_information(wl, wlvif,
                                        bss_conf->ht_operation_mode);
                if (ret < 0) {
                        wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3550,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                                        struct ieee80211_bss_conf *bss_conf,
                                        u32 changed)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        bool do_join = false, set_assoc = false;
-       bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+       bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
        bool ibss_joined = false;
        u32 sta_rate_set = 0;
        int ret;
@@ -3373,14 +3569,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
 
        if (changed & BSS_CHANGED_IBSS) {
                if (bss_conf->ibss_joined) {
-                       set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+                       set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
                        ibss_joined = true;
                } else {
-                       if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
-                                              &wl->flags)) {
-                               wl1271_unjoin(wl);
-                               wl12xx_cmd_role_start_dev(wl);
-                               wl12xx_roc(wl, wl->dev_role_id);
+                       if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+                                              &wlvif->flags)) {
+                               wl1271_unjoin(wl, wlvif);
+                               wl12xx_start_dev(wl, wlvif);
                        }
                }
        }
@@ -3396,46 +3591,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
                             bss_conf->enable_beacon ? "enabled" : "disabled");
 
-               if (bss_conf->enable_beacon)
-                       wl->set_bss_type = BSS_TYPE_IBSS;
-               else
-                       wl->set_bss_type = BSS_TYPE_STA_BSS;
                do_join = true;
        }
 
+       if (changed & BSS_CHANGED_IDLE) {
+               ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+               if (ret < 0)
+                       wl1271_warning("idle mode change failed %d", ret);
+       }
+
        if ((changed & BSS_CHANGED_CQM)) {
                bool enable = false;
                if (bss_conf->cqm_rssi_thold)
                        enable = true;
-               ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+               ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
                                                  bss_conf->cqm_rssi_thold,
                                                  bss_conf->cqm_rssi_hyst);
                if (ret < 0)
                        goto out;
-               wl->rssi_thold = bss_conf->cqm_rssi_thold;
+               wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
        }
 
-       if ((changed & BSS_CHANGED_BSSID) &&
-           /*
-            * Now we know the correct bssid, so we send a new join command
-            * and enable the BSSID filter
-            */
-           memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
-               memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
-               if (!is_zero_ether_addr(wl->bssid)) {
-                       ret = wl1271_cmd_build_null_data(wl);
+       if (changed & BSS_CHANGED_BSSID)
+               if (!is_zero_ether_addr(bss_conf->bssid)) {
+                       ret = wl12xx_cmd_build_null_data(wl, wlvif);
                        if (ret < 0)
                                goto out;
 
-                       ret = wl1271_build_qos_null_data(wl);
+                       ret = wl1271_build_qos_null_data(wl, vif);
                        if (ret < 0)
                                goto out;
 
                        /* Need to update the BSSID (for filtering etc) */
                        do_join = true;
                }
-       }
 
        if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
                rcu_read_lock();
@@ -3459,26 +3648,28 @@ sta_not_found:
                if (bss_conf->assoc) {
                        u32 rates;
                        int ieoffset;
-                       wl->aid = bss_conf->aid;
+                       wlvif->aid = bss_conf->aid;
                        set_assoc = true;
 
-                       wl->ps_poll_failures = 0;
+                       wlvif->ps_poll_failures = 0;
 
                        /*
                         * use basic rates from AP, and determine lowest rate
                         * to use with control frames.
                         */
                        rates = bss_conf->basic_rates;
-                       wl->basic_rate_set =
+                       wlvif->basic_rate_set =
                                wl1271_tx_enabled_rates_get(wl, rates,
-                                                           wl->band);
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+                                                           wlvif->band);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
                        if (sta_rate_set)
-                               wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+                               wlvif->rate_set =
+                                       wl1271_tx_enabled_rates_get(wl,
                                                                sta_rate_set,
-                                                               wl->band);
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                                                               wlvif->band);
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                goto out;
 
@@ -3488,53 +3679,56 @@ sta_not_found:
                         * updates it by itself when the first beacon is
                         * received after a join.
                         */
-                       ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+                       ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
                        if (ret < 0)
                                goto out;
 
                        /*
                         * Get a template for hardware connection maintenance
                         */
-                       dev_kfree_skb(wl->probereq);
-                       wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+                       dev_kfree_skb(wlvif->probereq);
+                       wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+                                                                       wlvif,
+                                                                       NULL);
                        ieoffset = offsetof(struct ieee80211_mgmt,
                                            u.probe_req.variable);
-                       wl1271_ssid_set(wl, wl->probereq, ieoffset);
+                       wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
 
                        /* enable the connection monitoring feature */
-                       ret = wl1271_acx_conn_monit_params(wl, true);
+                       ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
                        if (ret < 0)
                                goto out;
                } else {
                        /* use defaults when not associated */
                        bool was_assoc =
-                           !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
-                                                &wl->flags);
+                           !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+                                                &wlvif->flags);
                        bool was_ifup =
-                           !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
-                                                &wl->flags);
-                       wl->aid = 0;
+                           !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+                                                &wlvif->flags);
+                       wlvif->aid = 0;
 
                        /* free probe-request template */
-                       dev_kfree_skb(wl->probereq);
-                       wl->probereq = NULL;
+                       dev_kfree_skb(wlvif->probereq);
+                       wlvif->probereq = NULL;
 
                        /* re-enable dynamic ps - just in case */
-                       ieee80211_enable_dyn_ps(wl->vif);
+                       ieee80211_enable_dyn_ps(vif);
 
                        /* revert back to minimum rates for the current band */
-                       wl1271_set_band_rate(wl);
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                       wl1271_set_band_rate(wl, wlvif);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                goto out;
 
                        /* disable connection monitor features */
-                       ret = wl1271_acx_conn_monit_params(wl, false);
+                       ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
 
                        /* Disable the keep-alive feature */
-                       ret = wl1271_acx_keep_alive_mode(wl, false);
+                       ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
                        if (ret < 0)
                                goto out;
 
@@ -3546,7 +3740,7 @@ sta_not_found:
                                 * no IF_OPER_UP notification.
                                 */
                                if (!was_ifup) {
-                                       ret = wl12xx_croc(wl, wl->role_id);
+                                       ret = wl12xx_croc(wl, wlvif->role_id);
                                        if (ret < 0)
                                                goto out;
                                }
@@ -3555,17 +3749,16 @@ sta_not_found:
                                 * roaming on the same channel. until we will
                                 * have a better flow...)
                                 */
-                               if (test_bit(wl->dev_role_id, wl->roc_map)) {
-                                       ret = wl12xx_croc(wl, wl->dev_role_id);
+                               if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+                                       ret = wl12xx_croc(wl,
+                                                         wlvif->dev_role_id);
                                        if (ret < 0)
                                                goto out;
                                }
 
-                               wl1271_unjoin(wl);
-                               if (!(conf_flags & IEEE80211_CONF_IDLE)) {
-                                       wl12xx_cmd_role_start_dev(wl);
-                                       wl12xx_roc(wl, wl->dev_role_id);
-                               }
+                               wl1271_unjoin(wl, wlvif);
+                               if (!(conf_flags & IEEE80211_CONF_IDLE))
+                                       wl12xx_start_dev(wl, wlvif);
                        }
                }
        }
@@ -3576,27 +3769,28 @@ sta_not_found:
 
                if (bss_conf->ibss_joined) {
                        u32 rates = bss_conf->basic_rates;
-                       wl->basic_rate_set =
+                       wlvif->basic_rate_set =
                                wl1271_tx_enabled_rates_get(wl, rates,
-                                                           wl->band);
-                       wl->basic_rate =
-                               wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+                                                           wlvif->band);
+                       wlvif->basic_rate =
+                               wl1271_tx_min_rate_get(wl,
+                                                      wlvif->basic_rate_set);
 
                        /* by default, use 11b + OFDM rates */
-                       wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
-                       ret = wl1271_acx_sta_rate_policies(wl);
+                       wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+                       ret = wl1271_acx_sta_rate_policies(wl, wlvif);
                        if (ret < 0)
                                goto out;
                }
        }
 
-       ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+       ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
        if (ret < 0)
                goto out;
 
        if (changed & BSS_CHANGED_ARP_FILTER) {
                __be32 addr = bss_conf->arp_addr_list[0];
-               WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+               WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
 
                if (bss_conf->arp_addr_cnt == 1 &&
                    bss_conf->arp_filter_enabled) {
@@ -3606,24 +3800,24 @@ sta_not_found:
                         * isn't being set (when sending), so we have to
                         * reconfigure the template upon every ip change.
                         */
-                       ret = wl1271_cmd_build_arp_rsp(wl, addr);
+                       ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
                        if (ret < 0) {
                                wl1271_warning("build arp rsp failed: %d", ret);
                                goto out;
                        }
 
-                       ret = wl1271_acx_arp_ip_filter(wl,
+                       ret = wl1271_acx_arp_ip_filter(wl, wlvif,
                                ACX_ARP_FILTER_ARP_FILTERING,
                                addr);
                } else
-                       ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+                       ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
 
                if (ret < 0)
                        goto out;
        }
 
        if (do_join) {
-               ret = wl1271_join(wl, set_assoc);
+               ret = wl1271_join(wl, wlvif, set_assoc);
                if (ret < 0) {
                        wl1271_warning("cmd join failed %d", ret);
                        goto out;
@@ -3631,35 +3825,31 @@ sta_not_found:
 
                /* ROC until connected (after EAPOL exchange) */
                if (!is_ibss) {
-                       ret = wl12xx_roc(wl, wl->role_id);
+                       ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
                        if (ret < 0)
                                goto out;
 
-                       wl1271_check_operstate(wl,
+                       wl1271_check_operstate(wl, wlvif,
                                               ieee80211_get_operstate(vif));
                }
                /*
                 * stop device role if started (we might already be in
                 * STA role). TODO: make it better.
                 */
-               if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
-                       ret = wl12xx_croc(wl, wl->dev_role_id);
-                       if (ret < 0)
-                               goto out;
-
-                       ret = wl12xx_cmd_role_stop_dev(wl);
+               if (wlvif->dev_role_id != WL12XX_INVALID_ROLE_ID) {
+                       ret = wl12xx_stop_dev(wl, wlvif);
                        if (ret < 0)
                                goto out;
                }
 
                /* If we want to go in PSM but we're not there yet */
-               if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
-                   !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+               if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+                   !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
                        enum wl1271_cmd_ps_mode mode;
 
                        mode = STATION_POWER_SAVE_MODE;
-                       ret = wl1271_ps_set_mode(wl, mode,
-                                                wl->basic_rate,
+                       ret = wl1271_ps_set_mode(wl, wlvif, mode,
+                                                wlvif->basic_rate,
                                                 true);
                        if (ret < 0)
                                goto out;
@@ -3673,7 +3863,7 @@ sta_not_found:
                        ret = wl1271_acx_set_ht_capabilities(wl,
                                                             &sta_ht_cap,
                                                             true,
-                                                            wl->sta_hlid);
+                                                            wlvif->sta.hlid);
                        if (ret < 0) {
                                wl1271_warning("Set ht cap true failed %d",
                                               ret);
@@ -3685,7 +3875,7 @@ sta_not_found:
                        ret = wl1271_acx_set_ht_capabilities(wl,
                                                             &sta_ht_cap,
                                                             false,
-                                                            wl->sta_hlid);
+                                                            wlvif->sta.hlid);
                        if (ret < 0) {
                                wl1271_warning("Set ht cap false failed %d",
                                               ret);
@@ -3697,7 +3887,7 @@ sta_not_found:
        /* Handle HT information change. Done after join. */
        if ((changed & BSS_CHANGED_HT) &&
            (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
-               ret = wl1271_acx_set_ht_information(wl,
+               ret = wl1271_acx_set_ht_information(wl, wlvif,
                                        bss_conf->ht_operation_mode);
                if (ret < 0) {
                        wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3905,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
                                       u32 changed)
 {
        struct wl1271 *wl = hw->priv;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3917,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
+       if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+               goto out;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
@@ -3746,6 +3940,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
                             const struct ieee80211_tx_queue_params *params)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        u8 ps_scheme;
        int ret = 0;
 
@@ -3792,13 +3987,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
         * the txop is confed in units of 32us by the mac80211,
         * we need us
         */
-       ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+       ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
                                params->cw_min, params->cw_max,
                                params->aifs, params->txop << 5);
        if (ret < 0)
                goto out_sleep;
 
-       ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+       ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
                                 CONF_CHANNEL_TYPE_EDCF,
                                 wl1271_tx_get_queue(queue),
                                 ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4056,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
 }
 
 static int wl1271_allocate_sta(struct wl1271 *wl,
-                            struct ieee80211_sta *sta,
-                            u8 *hlid)
+                            struct wl12xx_vif *wlvif,
+                            struct ieee80211_sta *sta)
 {
        struct wl1271_station *wl_sta;
-       int id;
+       int ret;
 
-       id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
-       if (id >= AP_MAX_STATIONS) {
+
+       if (wl->active_sta_count >= AP_MAX_STATIONS) {
                wl1271_warning("could not allocate HLID - too much stations");
                return -EBUSY;
        }
 
        wl_sta = (struct wl1271_station *)sta->drv_priv;
-       set_bit(id, wl->ap_hlid_map);
-       wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
-       *hlid = wl_sta->hlid;
+       ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+       if (ret < 0) {
+               wl1271_warning("could not allocate HLID - too many links");
+               return -EBUSY;
+       }
+
+       set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
        memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
        wl->active_sta_count++;
        return 0;
 }
 
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
-       int id = hlid - WL1271_AP_STA_HLID_START;
-
-       if (hlid < WL1271_AP_STA_HLID_START)
-               return;
-
-       if (!test_bit(id, wl->ap_hlid_map))
+       if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
                return;
 
-       clear_bit(id, wl->ap_hlid_map);
+       clear_bit(hlid, wlvif->ap.sta_hlid_map);
        memset(wl->links[hlid].addr, 0, ETH_ALEN);
        wl->links[hlid].ba_bitmap = 0;
        wl1271_tx_reset_link_queues(wl, hlid);
        __clear_bit(hlid, &wl->ap_ps_map);
        __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+       wl12xx_free_link(wl, wlvif, &hlid);
        wl->active_sta_count--;
 }
 
@@ -3906,6 +4101,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
                             struct ieee80211_sta *sta)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       struct wl1271_station *wl_sta;
        int ret = 0;
        u8 hlid;
 
@@ -3914,20 +4111,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS)
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS)
                goto out;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
 
-       ret = wl1271_allocate_sta(wl, sta, &hlid);
+       ret = wl1271_allocate_sta(wl, wlvif, sta);
        if (ret < 0)
                goto out;
 
+       wl_sta = (struct wl1271_station *)sta->drv_priv;
+       hlid = wl_sta->hlid;
+
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out_free_sta;
 
-       ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+       ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
        if (ret < 0)
                goto out_sleep;
 
@@ -3944,7 +4144,7 @@ out_sleep:
 
 out_free_sta:
        if (ret < 0)
-               wl1271_free_sta(wl, hlid);
+               wl1271_free_sta(wl, wlvif, hlid);
 
 out:
        mutex_unlock(&wl->mutex);
@@ -3956,6 +4156,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
                                struct ieee80211_sta *sta)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl1271_station *wl_sta;
        int ret = 0, id;
 
@@ -3964,14 +4165,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
        if (unlikely(wl->state == WL1271_STATE_OFF))
                goto out;
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS)
+       if (wlvif->bss_type != BSS_TYPE_AP_BSS)
                goto out;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
 
        wl_sta = (struct wl1271_station *)sta->drv_priv;
-       id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
-       if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+       id = wl_sta->hlid;
+       if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
                goto out;
 
        ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4183,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out_sleep;
 
-       wl1271_free_sta(wl, wl_sta->hlid);
+       wl1271_free_sta(wl, wlvif, wl_sta->hlid);
 
 out_sleep:
        wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4200,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
                                  u8 buf_size)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
        u8 hlid, *ba_bitmap;
 
@@ -4016,10 +4218,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
                goto out;
        }
 
-       if (wl->bss_type == BSS_TYPE_STA_BSS) {
-               hlid = wl->sta_hlid;
-               ba_bitmap = &wl->ba_rx_bitmap;
-       } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+               hlid = wlvif->sta.hlid;
+               ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+       } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
                struct wl1271_station *wl_sta;
 
                wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4241,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
 
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
-               if (!wl->ba_support || !wl->ba_allowed) {
+               if (!wlvif->ba_support || !wlvif->ba_allowed) {
                        ret = -ENOTSUPP;
                        break;
                }
@@ -4108,8 +4310,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
                                   const struct cfg80211_bitrate_mask *mask)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl1271 *wl = hw->priv;
-       int i;
+       int i, ret = 0;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
                mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4321,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
 
        for (i = 0; i < IEEE80211_NUM_BANDS; i++)
-               wl->bitrate_masks[i] =
+               wlvif->bitrate_masks[i] =
                        wl1271_tx_enabled_rates_get(wl,
                                                    mask->control[i].legacy,
                                                    i);
+
+       if (unlikely(wl->state == WL1271_STATE_OFF))
+               goto out;
+
+       if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+           !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+               ret = wl1271_ps_elp_wakeup(wl);
+               if (ret < 0)
+                       goto out;
+
+               wl1271_set_band_rate(wl, wlvif);
+               wlvif->basic_rate =
+                       wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+               ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+               wl1271_ps_elp_sleep(wl);
+       }
+out:
        mutex_unlock(&wl->mutex);
 
-       return 0;
+       return ret;
 }
 
 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
                                     struct ieee80211_channel_switch *ch_switch)
 {
        struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif;
        int ret;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4361,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
        mutex_lock(&wl->mutex);
 
        if (unlikely(wl->state == WL1271_STATE_OFF)) {
-               mutex_unlock(&wl->mutex);
-               ieee80211_chswitch_done(wl->vif, false);
-               return;
+               wl12xx_for_each_wlvif_sta(wl, wlvif) {
+                       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+                       ieee80211_chswitch_done(vif, false);
+               }
+               goto out;
        }
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+       /* TODO: change mac80211 to pass vif as param */
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               ret = wl12xx_cmd_channel_switch(wl, ch_switch);
 
-       if (!ret)
-               set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+               if (!ret)
+                       set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+       }
 
        wl1271_ps_elp_sleep(wl);
 
@@ -4170,10 +4398,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
 
        /* packets are considered pending if in the TX queue or the FW */
        ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
-       /* the above is appropriate for STA mode for PS purposes */
-       WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
 out:
        mutex_unlock(&wl->mutex);
 
@@ -4604,7 +4828,7 @@ static struct bin_attribute fwlog_attr = {
        .read = wl1271_sysfs_read_fwlog,
 };
 
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
 {
        int ret;
 
@@ -4645,9 +4869,8 @@ int wl1271_register_hw(struct wl1271 *wl)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
 
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
 {
        if (wl->state == WL1271_STATE_PLT)
                __wl1271_plt_stop(wl);
@@ -4657,9 +4880,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
        wl->mac80211_registered = false;
 
 }
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
 
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
        static const u32 cipher_suites[] = {
                WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4958,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
 
        wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
 
-       SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+       /* the FW answers probe-requests in AP-mode */
+       wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+       wl->hw->wiphy->probe_resp_offload =
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+       SET_IEEE80211_DEV(wl->hw, wl->dev);
 
        wl->hw->sta_data_size = sizeof(struct wl1271_station);
+       wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
 
        wl->hw->max_rx_aggregation_subframes = 8;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
 
 #define WL1271_DEFAULT_CHANNEL 0
 
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
 {
        struct ieee80211_hw *hw;
-       struct platform_device *plat_dev = NULL;
        struct wl1271 *wl;
        int i, j, ret;
        unsigned int order;
 
-       BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+       BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
 
        hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
        if (!hw) {
@@ -4765,41 +4993,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
                goto err_hw_alloc;
        }
 
-       plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
-       if (!plat_dev) {
-               wl1271_error("could not allocate platform_device");
-               ret = -ENOMEM;
-               goto err_plat_alloc;
-       }
-
        wl = hw->priv;
        memset(wl, 0, sizeof(*wl));
 
        INIT_LIST_HEAD(&wl->list);
+       INIT_LIST_HEAD(&wl->wlvif_list);
 
        wl->hw = hw;
-       wl->plat_dev = plat_dev;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
-               skb_queue_head_init(&wl->tx_queue[i]);
-
-       for (i = 0; i < NUM_TX_QUEUES; i++)
-               for (j = 0; j < AP_MAX_LINKS; j++)
+               for (j = 0; j < WL12XX_MAX_LINKS; j++)
                        skb_queue_head_init(&wl->links[j].tx_queue[i]);
 
        skb_queue_head_init(&wl->deferred_rx_queue);
        skb_queue_head_init(&wl->deferred_tx_queue);
 
        INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
-       INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
        INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
        INIT_WORK(&wl->tx_work, wl1271_tx_work);
        INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
        INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
-       INIT_WORK(&wl->rx_streaming_enable_work,
-                 wl1271_rx_streaming_enable_work);
-       INIT_WORK(&wl->rx_streaming_disable_work,
-                 wl1271_rx_streaming_disable_work);
 
        wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
        if (!wl->freezable_wq) {
@@ -4808,41 +5021,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
        }
 
        wl->channel = WL1271_DEFAULT_CHANNEL;
-       wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
-       wl->default_key = 0;
        wl->rx_counter = 0;
-       wl->psm_entry_retry = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
-       wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
-       wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
-       wl->rate_set = CONF_TX_RATE_MASK_BASIC;
        wl->band = IEEE80211_BAND_2GHZ;
        wl->vif = NULL;
        wl->flags = 0;
        wl->sg_enabled = true;
        wl->hw_pg_ver = -1;
-       wl->bss_type = MAX_BSS_TYPE;
-       wl->set_bss_type = MAX_BSS_TYPE;
-       wl->last_tx_hlid = 0;
        wl->ap_ps_map = 0;
        wl->ap_fw_ps_map = 0;
        wl->quirks = 0;
        wl->platform_quirks = 0;
        wl->sched_scanning = false;
-       wl->tx_security_seq = 0;
-       wl->tx_security_last_seq_lsb = 0;
        wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
-       wl->role_id = WL12XX_INVALID_ROLE_ID;
        wl->system_hlid = WL12XX_SYSTEM_HLID;
-       wl->sta_hlid = WL12XX_INVALID_LINK_ID;
-       wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
-       wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-       wl->session_counter = 0;
-       wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
-       wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
        wl->active_sta_count = 0;
-       setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
-                   (unsigned long) wl);
        wl->fwlog_size = 0;
        init_waitqueue_head(&wl->fwlog_waitq);
 
@@ -4860,8 +5053,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
 
        /* Apply default driver configuration. */
        wl1271_conf_init(wl);
-       wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
-       wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
 
        order = get_order(WL1271_AGGR_BUFFER_SIZE);
        wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5074,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
                goto err_dummy_packet;
        }
 
-       /* Register platform device */
-       ret = platform_device_register(wl->plat_dev);
-       if (ret) {
-               wl1271_error("couldn't register platform device");
-               goto err_fwlog;
-       }
-       dev_set_drvdata(&wl->plat_dev->dev, wl);
-
-       /* Create sysfs file to control bt coex state */
-       ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file bt_coex_state");
-               goto err_platform;
-       }
-
-       /* Create sysfs file to get HW PG version */
-       ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file hw_pg_ver");
-               goto err_bt_coex_state;
-       }
-
-       /* Create sysfs file for the FW log */
-       ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file fwlog");
-               goto err_hw_pg_ver;
-       }
-
        return hw;
 
-err_hw_pg_ver:
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
-       platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
-       free_page((unsigned long)wl->fwlog);
-
 err_dummy_packet:
        dev_kfree_skb(wl->dummy_packet);
 
@@ -4937,18 +5087,14 @@ err_wq:
 
 err_hw:
        wl1271_debugfs_exit(wl);
-       kfree(plat_dev);
-
-err_plat_alloc:
        ieee80211_free_hw(hw);
 
 err_hw_alloc:
 
        return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
 
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
 {
        /* Unblock any fwlog readers */
        mutex_lock(&wl->mutex);
@@ -4956,17 +5102,15 @@ int wl1271_free_hw(struct wl1271 *wl)
        wake_up_interruptible_all(&wl->fwlog_waitq);
        mutex_unlock(&wl->mutex);
 
-       device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+       device_remove_bin_file(wl->dev, &fwlog_attr);
 
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
 
-       device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-       platform_device_unregister(wl->plat_dev);
+       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
        free_page((unsigned long)wl->fwlog);
        dev_kfree_skb(wl->dummy_packet);
        free_pages((unsigned long)wl->aggr_buf,
                        get_order(WL1271_AGGR_BUFFER_SIZE));
-       kfree(wl->plat_dev);
 
        wl1271_debugfs_exit(wl);
 
@@ -4983,7 +5127,174 @@ int wl1271_free_hw(struct wl1271 *wl)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+       struct wl1271 *wl = cookie;
+       unsigned long flags;
+
+       wl1271_debug(DEBUG_IRQ, "IRQ");
+
+       /* complete the ELP completion */
+       spin_lock_irqsave(&wl->wl_lock, flags);
+       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+       if (wl->elp_compl) {
+               complete(wl->elp_compl);
+               wl->elp_compl = NULL;
+       }
+
+       if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+               /* don't enqueue a work right now. mark it as pending */
+               set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+               wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+               disable_irq_nosync(wl->irq);
+               pm_wakeup_event(wl->dev, 0);
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
+               return IRQ_HANDLED;
+       }
+       spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+       struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+       struct ieee80211_hw *hw;
+       struct wl1271 *wl;
+       unsigned long irqflags;
+       int ret = -ENODEV;
+
+       hw = wl1271_alloc_hw();
+       if (IS_ERR(hw)) {
+               wl1271_error("can't allocate hw");
+               ret = PTR_ERR(hw);
+               goto out;
+       }
+
+       wl = hw->priv;
+       wl->irq = platform_get_irq(pdev, 0);
+       wl->ref_clock = pdata->board_ref_clock;
+       wl->tcxo_clock = pdata->board_tcxo_clock;
+       wl->platform_quirks = pdata->platform_quirks;
+       wl->set_power = pdata->set_power;
+       wl->dev = &pdev->dev;
+       wl->if_ops = pdata->ops;
+
+       platform_set_drvdata(pdev, wl);
+
+       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+               irqflags = IRQF_TRIGGER_RISING;
+       else
+               irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+       ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+                                  irqflags,
+                                  pdev->name, wl);
+       if (ret < 0) {
+               wl1271_error("request_irq() failed: %d", ret);
+               goto out_free_hw;
+       }
+
+       ret = enable_irq_wake(wl->irq);
+       if (!ret) {
+               wl->irq_wake_enabled = true;
+               device_init_wakeup(wl->dev, 1);
+               if (pdata->pwr_in_suspend)
+                       hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+       }
+       disable_irq(wl->irq);
+
+       ret = wl1271_init_ieee80211(wl);
+       if (ret)
+               goto out_irq;
+
+       ret = wl1271_register_hw(wl);
+       if (ret)
+               goto out_irq;
+
+       /* Create sysfs file to control bt coex state */
+       ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file bt_coex_state");
+               goto out_irq;
+       }
+
+       /* Create sysfs file to get HW PG version */
+       ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file hw_pg_ver");
+               goto out_bt_coex_state;
+       }
+
+       /* Create sysfs file for the FW log */
+       ret = device_create_bin_file(wl->dev, &fwlog_attr);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file fwlog");
+               goto out_hw_pg_ver;
+       }
+
+       return 0;
+
+out_hw_pg_ver:
+       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+       free_irq(wl->irq, wl);
+
+out_free_hw:
+       wl1271_free_hw(wl);
+
+out:
+       return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+       struct wl1271 *wl = platform_get_drvdata(pdev);
+
+       if (wl->irq_wake_enabled) {
+               device_init_wakeup(wl->dev, 0);
+               disable_irq_wake(wl->irq);
+       }
+       wl1271_unregister_hw(wl);
+       free_irq(wl->irq, wl);
+       wl1271_free_hw(wl);
+
+       return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+       { "wl12xx", 0 },
+       {  } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+       .probe          = wl12xx_probe,
+       .remove         = __devexit_p(wl12xx_remove),
+       .id_table       = wl12xx_id_table,
+       .driver = {
+               .name   = "wl12xx_driver",
+               .owner  = THIS_MODULE,
+       }
+};
+
+static int __init wl12xx_init(void)
+{
+       return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+       platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
 
 u32 wl12xx_debug_level = DEBUG_NONE;
 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
index c15ebf2..a7a1108 100644 (file)
@@ -25,6 +25,7 @@
 #include "ps.h"
 #include "io.h"
 #include "tx.h"
+#include "debug.h"
 
 #define WL1271_WAKEUP_TIMEOUT 500
 
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
 {
        struct delayed_work *dwork;
        struct wl1271 *wl;
+       struct wl12xx_vif *wlvif;
 
        dwork = container_of(work, struct delayed_work, work);
        wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,15 @@ void wl1271_elp_work(struct work_struct *work)
        if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
                goto out;
 
-       if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
-           (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
-            !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+       if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
                goto out;
 
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+                   !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+                       goto out;
+       }
+
        wl1271_debug(DEBUG_PSM, "chip to elp");
        wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
        set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +71,17 @@ out:
 /* Routines to toggle sleep mode while in ELP */
 void wl1271_ps_elp_sleep(struct wl1271 *wl)
 {
+       struct wl12xx_vif *wlvif;
+
        /* we shouldn't get consecutive sleep requests */
        if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
                return;
 
-       if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
-           !test_bit(WL1271_FLAG_IDLE, &wl->flags))
-               return;
+       wl12xx_for_each_wlvif(wl, wlvif) {
+               if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+                   !test_bit(WL1271_FLAG_IDLE, &wl->flags))
+                       return;
+       }
 
        ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
                                     msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +153,8 @@ out:
        return 0;
 }
 
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
-                      u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
 {
        int ret;
 
@@ -152,39 +162,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
        case STATION_POWER_SAVE_MODE:
                wl1271_debug(DEBUG_PSM, "entering psm");
 
-               ret = wl1271_acx_wake_up_conditions(wl);
+               ret = wl1271_acx_wake_up_conditions(wl, wlvif);
                if (ret < 0) {
                        wl1271_error("couldn't set wake up conditions");
                        return ret;
                }
 
-               ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+               ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
                if (ret < 0)
                        return ret;
 
-               set_bit(WL1271_FLAG_PSM, &wl->flags);
+               set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
                break;
        case STATION_ACTIVE_MODE:
        default:
                wl1271_debug(DEBUG_PSM, "leaving psm");
 
                /* disable beacon early termination */
-               if (wl->band == IEEE80211_BAND_2GHZ) {
-                       ret = wl1271_acx_bet_enable(wl, false);
+               if (wlvif->band == IEEE80211_BAND_2GHZ) {
+                       ret = wl1271_acx_bet_enable(wl, wlvif, false);
                        if (ret < 0)
                                return ret;
                }
 
-               /* disable beacon filtering */
-               ret = wl1271_acx_beacon_filter_opt(wl, false);
-               if (ret < 0)
-                       return ret;
-
-               ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+               ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
                if (ret < 0)
                        return ret;
 
-               clear_bit(WL1271_FLAG_PSM, &wl->flags);
+               clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
                break;
        }
 
@@ -223,9 +228,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
        wl1271_handle_tx_low_watermark(wl);
 }
 
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u8 hlid, bool clean_queues)
 {
        struct ieee80211_sta *sta;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
        if (test_bit(hlid, &wl->ap_ps_map))
                return;
@@ -235,7 +242,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
                     clean_queues);
 
        rcu_read_lock();
-       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
        if (!sta) {
                wl1271_error("could not find sta %pM for starting ps",
                             wl->links[hlid].addr);
@@ -253,9 +260,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
        __set_bit(hlid, &wl->ap_ps_map);
 }
 
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
 {
        struct ieee80211_sta *sta;
+       struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
        if (!test_bit(hlid, &wl->ap_ps_map))
                return;
@@ -265,7 +273,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
        __clear_bit(hlid, &wl->ap_ps_map);
 
        rcu_read_lock();
-       sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+       sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
        if (!sta) {
                wl1271_error("could not find sta %pM for ending ps",
                             wl->links[hlid].addr);
index 25eb9bc..a12052f 100644 (file)
 #include "wl12xx.h"
 #include "acx.h"
 
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
-                      u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                      enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
 void wl1271_ps_elp_sleep(struct wl1271 *wl);
 int wl1271_ps_elp_wakeup(struct wl1271 *wl);
 void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                         u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
 
 #define WL1271_PS_COMPLETE_TIMEOUT 500
 
index 3f570f3..df34d59 100644 (file)
 
 
 /* Firmware image load chunk size */
-#define CHUNK_SIZE          512
+#define CHUNK_SIZE     16384
 
 /* Firmware image header size */
 #define FW_HDR_SIZE 8
index dee4cfe..8c277c0 100644 (file)
 #include <linux/sched.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
 #include "rx.h"
+#include "tx.h"
 #include "io.h"
 
 static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
 }
 
 static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
-                                bool unaligned)
+                                bool unaligned, u8 *hlid)
 {
        struct wl1271_rx_descriptor *desc;
        struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
         * payload aligned to 4 bytes.
         */
        memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+       *hlid = desc->hlid;
 
        hdr = (struct ieee80211_hdr *)skb->data;
        if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
        wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
 
        seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
-       wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+       wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
                     skb->len - desc->pad_len,
                     beacon ? "beacon" : "",
-                    seq_num);
+                    seq_num, *hlid);
 
        skb_trim(skb, skb->len - desc->pad_len);
 
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
 void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
 {
        struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+       unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        u32 buf_size;
        u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
        u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
        u32 mem_block;
        u32 pkt_length;
        u32 pkt_offset;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-       bool had_data = false;
+       u8 hlid;
        bool unaligned = false;
 
        while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,11 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
                         */
                        if (wl1271_rx_handle_data(wl,
                                                  wl->aggr_buf + pkt_offset,
-                                                 pkt_length, unaligned) == 1)
-                               had_data = true;
+                                                 pkt_length, unaligned,
+                                                 &hlid) == 1) {
+                               WARN_ON(hlid >= WL12XX_MAX_LINKS);
+                               __set_bit(hlid, active_hlids);
+                       }
 
                        wl->rx_counter++;
                        drv_rx_counter++;
@@ -270,17 +276,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
        if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
                wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
 
-       if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
-           (wl->conf.rx_streaming.always ||
-            test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
-               u32 timeout = wl->conf.rx_streaming.duration;
-
-               /* restart rx streaming */
-               if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
-                       ieee80211_queue_work(wl->hw,
-                                            &wl->rx_streaming_enable_work);
-
-               mod_timer(&wl->rx_streaming_timer,
-                         jiffies + msecs_to_jiffies(timeout));
-       }
+       wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
index fc29c67..8599dab 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ieee80211.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "cmd.h"
 #include "scan.h"
 #include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
 {
        struct delayed_work *dwork;
        struct wl1271 *wl;
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        int ret;
        bool is_sta, is_ibss;
 
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
        if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
                goto out;
 
+       vif = wl->scan_vif;
+       wlvif = wl12xx_vif_to_data(vif);
+
        wl->scan.state = WL1271_SCAN_STATE_IDLE;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
        wl->scan.req = NULL;
+       wl->scan_vif = NULL;
 
        ret = wl1271_ps_elp_wakeup(wl);
        if (ret < 0)
                goto out;
 
-       if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+       if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
                /* restore hardware connection monitoring template */
-               wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+               wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
        }
 
        /* return to ROC if needed */
-       is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
-       is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
-       if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
-            (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
-           !test_bit(wl->dev_role_id, wl->roc_map)) {
+       is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+       is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+       if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+            (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+           !test_bit(wlvif->dev_role_id, wl->roc_map)) {
                /* restore remain on channel */
-               wl12xx_cmd_role_start_dev(wl);
-               wl12xx_roc(wl, wl->dev_role_id);
+               wl12xx_start_dev(wl, wlvif);
        }
        wl1271_ps_elp_sleep(wl);
 
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
 
 #define WL1271_NOTHING_TO_SCAN 1
 
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
-                            bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+                           enum ieee80211_band band,
+                           bool passive, u32 basic_rate)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        struct wl1271_cmd_scan *cmd;
        struct wl1271_cmd_trigger_scan_to *trigger;
        int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
        if (passive)
                scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-       if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+       if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
                goto out;
        }
-       cmd->params.role_id = wl->role_id;
+       cmd->params.role_id = wlvif->role_id;
        cmd->params.scan_options = cpu_to_le16(scan_options);
 
        cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
 
        cmd->params.tx_rate = cpu_to_le32(basic_rate);
        cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
-       cmd->params.tx_rate = cpu_to_le32(basic_rate);
        cmd->params.tid_trigger = 0;
        cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
 
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
                memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
        }
 
-       memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+       memcpy(cmd->addr, vif->addr, ETH_ALEN);
 
-       ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
-                                        wl->scan.req->ie, wl->scan.req->ie_len,
-                                        band);
+       ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+                                        wl->scan.ssid_len, wl->scan.req->ie,
+                                        wl->scan.req->ie_len, band);
        if (ret < 0) {
                wl1271_error("PROBE request template failed");
                goto out;
@@ -241,11 +248,12 @@ out:
        return ret;
 }
 
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
 {
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret = 0;
        enum ieee80211_band band;
-       u32 rate;
+       u32 rate, mask;
 
        switch (wl->scan.state) {
        case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
 
        case WL1271_SCAN_STATE_2GHZ_ACTIVE:
                band = IEEE80211_BAND_2GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, false, rate);
+               mask = wlvif->bitrate_masks[band];
+               if (wl->scan.req->no_cck) {
+                       mask &= ~CONF_TX_CCK_RATES;
+                       if (!mask)
+                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
+               }
+               rate = wl1271_tx_min_rate_get(wl, mask);
+               ret = wl1271_scan_send(wl, vif, band, false, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
 
        case WL1271_SCAN_STATE_2GHZ_PASSIVE:
                band = IEEE80211_BAND_2GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, true, rate);
+               mask = wlvif->bitrate_masks[band];
+               if (wl->scan.req->no_cck) {
+                       mask &= ~CONF_TX_CCK_RATES;
+                       if (!mask)
+                               mask = CONF_TX_RATE_MASK_BASIC_P2P;
+               }
+               rate = wl1271_tx_min_rate_get(wl, mask);
+               ret = wl1271_scan_send(wl, vif, band, true, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        if (wl->enable_11a)
                                wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
                        else
                                wl->scan.state = WL1271_SCAN_STATE_DONE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
 
        case WL1271_SCAN_STATE_5GHZ_ACTIVE:
                band = IEEE80211_BAND_5GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, false, rate);
+               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+               ret = wl1271_scan_send(wl, vif, band, false, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
 
        case WL1271_SCAN_STATE_5GHZ_PASSIVE:
                band = IEEE80211_BAND_5GHZ;
-               rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
-               ret = wl1271_scan_send(wl, band, true, rate);
+               rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+               ret = wl1271_scan_send(wl, vif, band, true, rate);
                if (ret == WL1271_NOTHING_TO_SCAN) {
                        wl->scan.state = WL1271_SCAN_STATE_DONE;
-                       wl1271_scan_stm(wl);
+                       wl1271_scan_stm(wl, vif);
                }
 
                break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
        }
 }
 
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+               const u8 *ssid, size_t ssid_len,
                struct cfg80211_scan_request *req)
 {
        /*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
                wl->scan.ssid_len = 0;
        }
 
+       wl->scan_vif = vif;
        wl->scan.req = req;
        memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
 
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
        ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
                                     msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
 
-       wl1271_scan_stm(wl);
+       wl1271_scan_stm(wl, vif);
 
        return 0;
 }
@@ -550,6 +572,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
                         * so they're used in probe requests.
                         */
                        for (i = 0; i < req->n_ssids; i++) {
+                               if (!req->ssids[i].ssid_len)
+                                       continue;
+
                                for (j = 0; j < cmd->n_ssids; j++)
                                        if (!memcmp(req->ssids[i].ssid,
                                                   cmd->ssids[j].ssid,
@@ -585,6 +610,7 @@ out:
 }
 
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+                                 struct wl12xx_vif *wlvif,
                                  struct cfg80211_sched_scan_request *req,
                                  struct ieee80211_sched_scan_ies *ies)
 {
@@ -631,7 +657,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        }
 
        if (!force_passive && cfg->active[0]) {
-               ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+               ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[IEEE80211_BAND_2GHZ],
                                                 ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +669,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
        }
 
        if (!force_passive && cfg->active[1]) {
-               ret = wl1271_cmd_build_probe_req(wl,  req->ssids[0].ssid,
+               ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
                                                 ies->ie[IEEE80211_BAND_5GHZ],
                                                 ies->len[IEEE80211_BAND_5GHZ],
@@ -667,14 +693,14 @@ out:
        return ret;
 }
 
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        struct wl1271_cmd_sched_scan_start *start;
        int ret = 0;
 
        wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
 
-       if (wl->bss_type != BSS_TYPE_STA_BSS)
+       if (wlvif->bss_type != BSS_TYPE_STA_BSS)
                return -EOPNOTSUPP;
 
        if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
index 9211515..a7ed43d 100644 (file)
 
 #include "wl12xx.h"
 
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+               const u8 *ssid, size_t ssid_len,
                struct cfg80211_scan_request *req);
 int wl1271_scan_stop(struct wl1271 *wl);
 int wl1271_scan_build_probe_req(struct wl1271 *wl,
                                const u8 *ssid, size_t ssid_len,
                                const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
 void wl1271_scan_complete_work(struct work_struct *work);
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+                                    struct wl12xx_vif *wlvif,
                                     struct cfg80211_sched_scan_request *req,
                                     struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
 void wl1271_scan_sched_scan_results(struct wl1271 *wl);
 
index 516a898..468a505 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
+#include <linux/platform_device.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
 #define SDIO_DEVICE_ID_TI_WL1271       0x4076
 #endif
 
+struct wl12xx_sdio_glue {
+       struct device *dev;
+       struct platform_device *core;
+};
+
 static const struct sdio_device_id wl1271_devices[] __devinitconst = {
        { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
        {}
 };
 MODULE_DEVICE_TABLE(sdio, wl1271_devices);
 
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
-{
-       sdio_claim_host(wl->if_priv);
-       sdio_set_block_size(wl->if_priv, blksz);
-       sdio_release_host(wl->if_priv);
-}
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
-       return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
-       return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+static void wl1271_sdio_set_block_size(struct device *child,
+                                      unsigned int blksz)
 {
-       struct wl1271 *wl = cookie;
-       unsigned long flags;
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
-       wl1271_debug(DEBUG_IRQ, "IRQ");
-
-       /* complete the ELP completion */
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       if (wl->elp_compl) {
-               complete(wl->elp_compl);
-               wl->elp_compl = NULL;
-       }
-
-       if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
-               /* don't enqueue a work right now. mark it as pending */
-               set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
-               wl1271_debug(DEBUG_IRQ, "should not enqueue work");
-               disable_irq_nosync(wl->irq);
-               pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-               return IRQ_HANDLED;
-       }
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-       return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-       disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-       enable_irq(wl->irq);
+       sdio_claim_host(func);
+       sdio_set_block_size(func, blksz);
+       sdio_release_host(func);
 }
 
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
                                 size_t len, bool fixed)
 {
        int ret;
-       struct sdio_func *func = wl_to_func(wl);
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
                ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
-                            addr, ((u8 *)buf)[0]);
+               dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+                       addr, ((u8 *)buf)[0]);
        } else {
                if (fixed)
                        ret = sdio_readsb(func, buf, addr, len);
                else
                        ret = sdio_memcpy_fromio(func, buf, addr, len);
 
-               wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
-                            addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+               dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+                       addr, len);
        }
 
        if (ret)
-               wl1271_error("sdio read failed (%d)", ret);
+               dev_err(child->parent, "sdio read failed (%d)\n", ret);
 }
 
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
                                  size_t len, bool fixed)
 {
        int ret;
-       struct sdio_func *func = wl_to_func(wl);
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
                sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
-                            addr, ((u8 *)buf)[0]);
+               dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+                       addr, ((u8 *)buf)[0]);
        } else {
-               wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
-                            addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+               dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+                       addr, len);
 
                if (fixed)
                        ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
        }
 
        if (ret)
-               wl1271_error("sdio write failed (%d)", ret);
+               dev_err(child->parent, "sdio write failed (%d)\n", ret);
 }
 
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
 {
-       struct sdio_func *func = wl_to_func(wl);
        int ret;
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        /* If enabled, tell runtime PM not to power off the card */
        if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
        return ret;
 }
 
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
 {
-       struct sdio_func *func = wl_to_func(wl);
        int ret;
+       struct sdio_func *func = dev_to_sdio_func(glue->dev);
 
        sdio_disable_func(func);
        sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
        return ret;
 }
 
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
 {
+       struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
        if (enable)
-               return wl1271_sdio_power_on(wl);
+               return wl12xx_sdio_power_on(glue);
        else
-               return wl1271_sdio_power_off(wl);
+               return wl12xx_sdio_power_off(glue);
 }
 
 static struct wl1271_if_operations sdio_ops = {
-       .read           = wl1271_sdio_raw_read,
-       .write          = wl1271_sdio_raw_write,
-       .power          = wl1271_sdio_set_power,
-       .dev            = wl1271_sdio_wl_to_dev,
-       .enable_irq     = wl1271_sdio_enable_interrupts,
-       .disable_irq    = wl1271_sdio_disable_interrupts,
+       .read           = wl12xx_sdio_raw_read,
+       .write          = wl12xx_sdio_raw_write,
+       .power          = wl12xx_sdio_set_power,
        .set_block_size = wl1271_sdio_set_block_size,
 };
 
 static int __devinit wl1271_probe(struct sdio_func *func,
                                  const struct sdio_device_id *id)
 {
-       struct ieee80211_hw *hw;
-       const struct wl12xx_platform_data *wlan_data;
-       struct wl1271 *wl;
-       unsigned long irqflags;
+       struct wl12xx_platform_data *wlan_data;
+       struct wl12xx_sdio_glue *glue;
+       struct resource res[1];
        mmc_pm_flag_t mmcflags;
-       int ret;
+       int ret = -ENOMEM;
 
        /* We are only able to handle the wlan function */
        if (func->num != 0x02)
                return -ENODEV;
 
-       hw = wl1271_alloc_hw();
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
-
-       wl = hw->priv;
+       glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       if (!glue) {
+               dev_err(&func->dev, "can't allocate glue\n");
+               goto out;
+       }
 
-       wl->if_priv = func;
-       wl->if_ops = &sdio_ops;
+       glue->dev = &func->dev;
 
        /* Grab access to FN0 for ELP reg. */
        func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
        wlan_data = wl12xx_get_platform_data();
        if (IS_ERR(wlan_data)) {
                ret = PTR_ERR(wlan_data);
-               wl1271_error("missing wlan platform data: %d", ret);
-               goto out_free;
+               dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+               goto out_free_glue;
        }
 
-       wl->irq = wlan_data->irq;
-       wl->ref_clock = wlan_data->board_ref_clock;
-       wl->tcxo_clock = wlan_data->board_tcxo_clock;
-       wl->platform_quirks = wlan_data->platform_quirks;
+       /* if sdio can keep power while host is suspended, enable wow */
+       mmcflags = sdio_get_host_pm_caps(func);
+       dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
 
-       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
-               irqflags = IRQF_TRIGGER_RISING;
-       else
-               irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
-
-       ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
-                                  irqflags,
-                                  DRIVER_NAME, wl);
-       if (ret < 0) {
-               wl1271_error("request_irq() failed: %d", ret);
-               goto out_free;
-       }
+       if (mmcflags & MMC_PM_KEEP_POWER)
+               wlan_data->pwr_in_suspend = true;
+
+       wlan_data->ops = &sdio_ops;
 
-       ret = enable_irq_wake(wl->irq);
-       if (!ret) {
-               wl->irq_wake_enabled = true;
-               device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+       sdio_set_drvdata(func, glue);
 
-               /* if sdio can keep power while host is suspended, enable wow */
-               mmcflags = sdio_get_host_pm_caps(func);
-               wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+       /* Tell PM core that we don't need the card to be powered now */
+       pm_runtime_put_noidle(&func->dev);
 
-               if (mmcflags & MMC_PM_KEEP_POWER)
-                       hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+       glue->core = platform_device_alloc("wl12xx", -1);
+       if (!glue->core) {
+               dev_err(glue->dev, "can't allocate platform_device");
+               ret = -ENOMEM;
+               goto out_free_glue;
        }
-       disable_irq(wl->irq);
 
-       ret = wl1271_init_ieee80211(wl);
-       if (ret)
-               goto out_irq;
+       glue->core->dev.parent = &func->dev;
 
-       ret = wl1271_register_hw(wl);
-       if (ret)
-               goto out_irq;
+       memset(res, 0x00, sizeof(res));
 
-       sdio_set_drvdata(func, wl);
+       res[0].start = wlan_data->irq;
+       res[0].flags = IORESOURCE_IRQ;
+       res[0].name = "irq";
 
-       /* Tell PM core that we don't need the card to be powered now */
-       pm_runtime_put_noidle(&func->dev);
+       ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+       if (ret) {
+               dev_err(glue->dev, "can't add resources\n");
+               goto out_dev_put;
+       }
 
+       ret = platform_device_add_data(glue->core, wlan_data,
+                                      sizeof(*wlan_data));
+       if (ret) {
+               dev_err(glue->dev, "can't add platform data\n");
+               goto out_dev_put;
+       }
+
+       ret = platform_device_add(glue->core);
+       if (ret) {
+               dev_err(glue->dev, "can't add platform device\n");
+               goto out_dev_put;
+       }
        return 0;
 
- out_irq:
-       free_irq(wl->irq, wl);
+out_dev_put:
+       platform_device_put(glue->core);
 
- out_free:
-       wl1271_free_hw(wl);
+out_free_glue:
+       kfree(glue);
 
+out:
        return ret;
 }
 
 static void __devexit wl1271_remove(struct sdio_func *func)
 {
-       struct wl1271 *wl = sdio_get_drvdata(func);
+       struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
 
        /* Undo decrement done above in wl1271_probe */
        pm_runtime_get_noresume(&func->dev);
 
-       wl1271_unregister_hw(wl);
-       if (wl->irq_wake_enabled) {
-               device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
-               disable_irq_wake(wl->irq);
-       }
-       free_irq(wl->irq, wl);
-       wl1271_free_hw(wl);
+       platform_device_del(glue->core);
+       platform_device_put(glue->core);
+       kfree(glue);
 }
 
 #ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
        /* Tell MMC/SDIO core it's OK to power down the card
         * (if it isn't already), but not to remove it completely */
        struct sdio_func *func = dev_to_sdio_func(dev);
-       struct wl1271 *wl = sdio_get_drvdata(func);
+       struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+       struct wl1271 *wl = platform_get_drvdata(glue->core);
        mmc_pm_flag_t sdio_flags;
        int ret = 0;
 
-       wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
-                    wl->wow_enabled);
+       dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+               wl->wow_enabled);
 
        /* check whether sdio should keep power */
        if (wl->wow_enabled) {
                sdio_flags = sdio_get_host_pm_caps(func);
 
                if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
-                       wl1271_error("can't keep power while host "
-                                    "is suspended");
+                       dev_err(dev, "can't keep power while host "
+                                    "is suspended\n");
                        ret = -EINVAL;
                        goto out;
                }
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
                /* keep power while host suspended */
                ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
                if (ret) {
-                       wl1271_error("error while trying to keep power");
+                       dev_err(dev, "error while trying to keep power\n");
                        goto out;
                }
 
@@ -367,9 +325,10 @@ out:
 static int wl1271_resume(struct device *dev)
 {
        struct sdio_func *func = dev_to_sdio_func(dev);
-       struct wl1271 *wl = sdio_get_drvdata(func);
+       struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+       struct wl1271 *wl = platform_get_drvdata(glue->core);
 
-       wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+       dev_dbg(dev, "wl1271 resume\n");
        if (wl->wow_enabled) {
                /* claim back host */
                sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644 (file)
index f25d5d9..0000000
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI              0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271       0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
-       "This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
-       "This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
-       struct wl1271 wl;
-       struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
-       { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
-       {}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
-       return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
-       return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
-               size_t len, bool fixed)
-{
-       int ret = 0;
-       struct sdio_func *func = wl_to_func(wl);
-
-       if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
-               ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
-                               addr, ((u8 *)buf)[0]);
-       } else {
-               if (fixed)
-                       ret = sdio_readsb(func, buf, addr, len);
-               else
-                       ret = sdio_memcpy_fromio(func, buf, addr, len);
-
-               wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
-                               addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-       }
-
-       if (ret)
-               wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
-               size_t len, bool fixed)
-{
-       int ret = 0;
-       struct sdio_func *func = wl_to_func(wl);
-
-       if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
-               sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
-               wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
-                               addr, ((u8 *)buf)[0]);
-       } else {
-               wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
-                               addr, len);
-               wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
-               if (fixed)
-                       ret = sdio_writesb(func, addr, buf, len);
-               else
-                       ret = sdio_memcpy_toio(func, addr, buf, len);
-       }
-       if (ret)
-               wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
-       struct sdio_func *func = wl_to_func(wl);
-       int ret;
-
-       /* Let the SDIO stack handle wlan_enable control, so we
-        * keep host claimed while wlan is in use to keep wl1271
-        * alive.
-        */
-       if (enable) {
-               /* Power up the card */
-               ret = pm_runtime_get_sync(&func->dev);
-               if (ret < 0)
-                       goto out;
-
-               /* Runtime PM might be disabled, power up the card manually */
-               ret = mmc_power_restore_host(func->card->host);
-               if (ret < 0)
-                       goto out;
-
-               sdio_claim_host(func);
-               sdio_enable_func(func);
-       } else {
-               sdio_disable_func(func);
-               sdio_release_host(func);
-
-               /* Runtime PM might be disabled, power off the card manually */
-               ret = mmc_power_save_host(func->card->host);
-               if (ret < 0)
-                       goto out;
-
-               /* Power down the card */
-               ret = pm_runtime_put_sync(&func->dev);
-       }
-
-out:
-       return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
-       .read           = wl1271_sdio_raw_read,
-       .write          = wl1271_sdio_raw_write,
-       .power          = wl1271_sdio_set_power,
-       .dev            = wl1271_sdio_wl_to_dev,
-       .enable_irq     = wl1271_sdio_enable_interrupts,
-       .disable_irq    = wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
-       u32 elp_reg;
-
-       elp_reg = ELPCTRL_WAKE_UP;
-       wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
-       const struct firmware *fw;
-       int ret;
-
-       if (wl->chip.id == CHIP_ID_1283_PG20)
-               ret = request_firmware(&fw, WL128X_FW_NAME,
-                                      wl1271_wl_to_dev(wl));
-       else
-               ret = request_firmware(&fw, WL127X_FW_NAME,
-                                      wl1271_wl_to_dev(wl));
-
-       if (ret < 0) {
-               wl1271_error("could not get firmware: %d", ret);
-               return ret;
-       }
-
-       if (fw->size % 4) {
-               wl1271_error("firmware size is not multiple of 32 bits: %zu",
-                               fw->size);
-               ret = -EILSEQ;
-               goto out;
-       }
-
-       wl->fw_len = fw->size;
-       wl->fw = vmalloc(wl->fw_len);
-
-       if (!wl->fw) {
-               wl1271_error("could not allocate memory for the firmware");
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       memcpy(wl->fw, fw->data, wl->fw_len);
-
-       ret = 0;
-
-out:
-       release_firmware(fw);
-
-       return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
-       const struct firmware *fw;
-       int ret;
-
-       ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
-       if (ret < 0) {
-               wl1271_error("could not get nvs file: %d", ret);
-               return ret;
-       }
-
-       wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
-       if (!wl->nvs) {
-               wl1271_error("could not allocate memory for the nvs file");
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       wl->nvs_len = fw->size;
-
-out:
-       release_firmware(fw);
-
-       return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
-       struct wl1271_partition_set partition;
-       int ret;
-
-       msleep(WL1271_PRE_POWER_ON_SLEEP);
-       ret = wl1271_power_on(wl);
-       if (ret)
-               return ret;
-
-       msleep(WL1271_POWER_ON_SLEEP);
-
-       /* We don't need a real memory partition here, because we only want
-        * to use the registers at this point. */
-       memset(&partition, 0, sizeof(partition));
-       partition.reg.start = REGISTERS_BASE;
-       partition.reg.size = REGISTERS_DOWN_SIZE;
-       wl1271_set_partition(wl, &partition);
-
-       /* ELP module wake up */
-       wl1271_fw_wakeup(wl);
-
-       /* whal_FwCtrl_BootSm() */
-
-       /* 0. read chip id from CHIP_ID */
-       wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
-       /* 1. check if chip id is valid */
-
-       switch (wl->chip.id) {
-       case CHIP_ID_1271_PG10:
-               wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
-                               wl->chip.id);
-               break;
-       case CHIP_ID_1271_PG20:
-               wl1271_notice("chip id 0x%x (1271 PG20)",
-                               wl->chip.id);
-               break;
-       case CHIP_ID_1283_PG20:
-               wl1271_notice("chip id 0x%x (1283 PG20)",
-                               wl->chip.id);
-               break;
-       case CHIP_ID_1283_PG10:
-       default:
-               wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
-               return -ENODEV;
-       }
-
-       return ret;
-}
-
-static struct wl1271_partition_set part_down = {
-       .mem = {
-               .start = 0x00000000,
-               .size  = 0x000177c0
-       },
-       .reg = {
-               .start = REGISTERS_BASE,
-               .size  = 0x00008800
-       },
-       .mem2 = {
-               .start = 0x00000000,
-               .size  = 0x00000000
-       },
-       .mem3 = {
-               .start = 0x00000000,
-               .size  = 0x00000000
-       },
-};
-
-static int tester(void *data)
-{
-       struct wl1271 *wl = data;
-       struct sdio_func *func = wl_to_func(wl);
-       struct device *pdev = &func->dev;
-       int ret = 0;
-       bool rx_started = 0;
-       bool tx_started = 0;
-       uint8_t *tx_buf, *rx_buf;
-       int test_size = PAGE_SIZE;
-       u32 addr = 0;
-       struct wl1271_partition_set partition;
-
-       /* We assume chip is powered up and firmware fetched */
-
-       memcpy(&partition, &part_down, sizeof(partition));
-       partition.mem.start = addr;
-       wl1271_set_partition(wl, &partition);
-
-       tx_buf = kmalloc(test_size, GFP_KERNEL);
-       rx_buf = kmalloc(test_size, GFP_KERNEL);
-       if (!tx_buf || !rx_buf) {
-               dev_err(pdev,
-                       "Could not allocate memory. Test will not run.\n");
-               ret = -ENOMEM;
-               goto free;
-       }
-
-       memset(tx_buf, 0x5a, test_size);
-
-       /* write something in data area so we can read it back */
-       wl1271_write(wl, addr, tx_buf, test_size, false);
-
-       while (!kthread_should_stop()) {
-               if (rx && !rx_started) {
-                       dev_info(pdev, "starting rx test\n");
-                       rx_started = 1;
-               } else if (!rx && rx_started) {
-                       dev_info(pdev, "stopping rx test\n");
-                       rx_started = 0;
-               }
-
-               if (tx && !tx_started) {
-                       dev_info(pdev, "starting tx test\n");
-                       tx_started = 1;
-               } else if (!tx && tx_started) {
-                       dev_info(pdev, "stopping tx test\n");
-                       tx_started = 0;
-               }
-
-               if (rx_started)
-                       wl1271_read(wl, addr, rx_buf, test_size, false);
-
-               if (tx_started)
-                       wl1271_write(wl, addr, tx_buf, test_size, false);
-
-               if (!rx_started && !tx_started)
-                       msleep(100);
-       }
-
-free:
-       kfree(tx_buf);
-       kfree(rx_buf);
-       return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
-               const struct sdio_device_id *id)
-{
-       const struct wl12xx_platform_data *wlan_data;
-       struct wl1271 *wl;
-       struct wl1271_test *wl_test;
-       int ret = 0;
-
-       /* wl1271 has 2 sdio functions we handle just the wlan part */
-       if (func->num != 0x02)
-               return -ENODEV;
-
-       wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
-       if (!wl_test) {
-               dev_err(&func->dev, "Could not allocate memory\n");
-               return -ENOMEM;
-       }
-
-       wl = &wl_test->wl;
-
-       wl->if_priv = func;
-       wl->if_ops = &sdio_ops;
-
-       /* Grab access to FN0 for ELP reg. */
-       func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
-       /* Use block mode for transferring over one block size of data */
-       func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
-       wlan_data = wl12xx_get_platform_data();
-       if (IS_ERR(wlan_data)) {
-               ret = PTR_ERR(wlan_data);
-               dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
-               goto out_free;
-       }
-
-       wl->irq = wlan_data->irq;
-       wl->ref_clock = wlan_data->board_ref_clock;
-       wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
-       sdio_set_drvdata(func, wl_test);
-
-       /* power up the device */
-       ret = wl1271_chip_wakeup(wl);
-       if (ret) {
-               dev_err(&func->dev, "could not wake up chip\n");
-               goto out_free;
-       }
-
-       if (wl->fw == NULL) {
-               ret = wl1271_fetch_firmware(wl);
-               if (ret < 0) {
-                       dev_err(&func->dev, "firmware fetch error\n");
-                       goto out_off;
-               }
-       }
-
-       /* fetch NVS */
-       if (wl->nvs == NULL) {
-               ret = wl1271_fetch_nvs(wl);
-               if (ret < 0) {
-                       dev_err(&func->dev, "NVS fetch error\n");
-                       goto out_off;
-               }
-       }
-
-       ret = wl1271_load_firmware(wl);
-       if (ret < 0) {
-               dev_err(&func->dev, "firmware load error: %d\n", ret);
-               goto out_free;
-       }
-
-       dev_info(&func->dev, "initialized\n");
-
-       /* I/O testing will be done in the tester thread */
-
-       wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
-       if (IS_ERR(wl_test->test_task)) {
-               dev_err(&func->dev, "unable to create kernel thread\n");
-               ret = PTR_ERR(wl_test->test_task);
-               goto out_free;
-       }
-
-       return 0;
-
-out_off:
-       /* power off the chip */
-       wl1271_power_off(wl);
-
-out_free:
-       kfree(wl_test);
-       return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
-       struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
-       /* stop the I/O test thread */
-       kthread_stop(wl_test->test_task);
-
-       /* power off the chip */
-       wl1271_power_off(&wl_test->wl);
-
-       vfree(wl_test->wl.fw);
-       wl_test->wl.fw = NULL;
-       kfree(wl_test->wl.nvs);
-       wl_test->wl.nvs = NULL;
-
-       kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
-       .name           = "wl12xx_sdio_test",
-       .id_table       = wl1271_devices,
-       .probe          = wl1271_probe,
-       .remove         = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
-       int ret;
-
-       ret = sdio_register_driver(&wl1271_sdio_driver);
-       if (ret < 0)
-               pr_err("failed to register sdio driver: %d\n", ret);
-
-       return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
-       sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
index 0f97186..92caa7c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/crc7.h>
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #include "wl12xx.h"
 
 #define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
 
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
-       return wl->if_priv;
-}
-
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
-{
-       return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
-       disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
-       enable_irq(wl->irq);
-}
+struct wl12xx_spi_glue {
+       struct device *dev;
+       struct platform_device *core;
+};
 
-static void wl1271_spi_reset(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
        u8 *cmd;
        struct spi_transfer t;
        struct spi_message m;
 
        cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
        if (!cmd) {
-               wl1271_error("could not allocate cmd for spi reset");
+               dev_err(child->parent,
+                       "could not allocate cmd for spi reset\n");
                return;
        }
 
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
        t.len = WSPI_INIT_CMD_LEN;
        spi_message_add_tail(&t, &m);
 
-       spi_sync(wl_to_spi(wl), &m);
+       spi_sync(to_spi_device(glue->dev), &m);
 
-       wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
        kfree(cmd);
 }
 
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
        u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
        struct spi_transfer t;
        struct spi_message m;
 
        cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
        if (!cmd) {
-               wl1271_error("could not allocate cmd for spi init");
+               dev_err(child->parent,
+                       "could not allocate cmd for spi init\n");
                return;
        }
 
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
        t.len = WSPI_INIT_CMD_LEN;
        spi_message_add_tail(&t, &m);
 
-       spi_sync(wl_to_spi(wl), &m);
-       wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+       spi_sync(to_spi_device(glue->dev), &m);
        kfree(cmd);
 }
 
 #define WL1271_BUSY_WORD_TIMEOUT 1000
 
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+       struct wl1271 *wl = dev_get_drvdata(child);
        struct spi_transfer t[1];
        struct spi_message m;
        u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
                t[0].len = sizeof(u32);
                t[0].cs_change = true;
                spi_message_add_tail(&t[0], &m);
-               spi_sync(wl_to_spi(wl), &m);
+               spi_sync(to_spi_device(glue->dev), &m);
 
                if (*busy_buf & 0x1)
                        return 0;
        }
 
        /* The SPI bus is unresponsive, the read failed. */
-       wl1271_error("SPI read busy-word timeout!\n");
+       dev_err(child->parent, "SPI read busy-word timeout!\n");
        return -ETIMEDOUT;
 }
 
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
                                size_t len, bool fixed)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+       struct wl1271 *wl = dev_get_drvdata(child);
        struct spi_transfer t[2];
        struct spi_message m;
        u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
                t[1].cs_change = true;
                spi_message_add_tail(&t[1], &m);
 
-               spi_sync(wl_to_spi(wl), &m);
+               spi_sync(to_spi_device(glue->dev), &m);
 
                if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
-                   wl1271_spi_read_busy(wl)) {
+                   wl12xx_spi_read_busy(child)) {
                        memset(buf, 0, chunk_len);
                        return;
                }
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
                t[0].cs_change = true;
                spi_message_add_tail(&t[0], &m);
 
-               spi_sync(wl_to_spi(wl), &m);
-
-               wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
-               wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+               spi_sync(to_spi_device(glue->dev), &m);
 
                if (!fixed)
                        addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
        }
 }
 
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
-                         size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+                                size_t len, bool fixed)
 {
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
        struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
        struct spi_message m;
        u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
                t[i].len = chunk_len;
                spi_message_add_tail(&t[i++], &m);
 
-               wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
-               wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
                if (!fixed)
                        addr += chunk_len;
                buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
                cmd++;
        }
 
-       spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
-       struct wl1271 *wl = cookie;
-       unsigned long flags;
-
-       wl1271_debug(DEBUG_IRQ, "IRQ");
-
-       /* complete the ELP completion */
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       if (wl->elp_compl) {
-               complete(wl->elp_compl);
-               wl->elp_compl = NULL;
-       }
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
-
-       return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
-       if (wl->set_power)
-               wl->set_power(enable);
-
-       return 0;
+       spi_sync(to_spi_device(glue->dev), &m);
 }
 
 static struct wl1271_if_operations spi_ops = {
-       .read           = wl1271_spi_raw_read,
-       .write          = wl1271_spi_raw_write,
-       .reset          = wl1271_spi_reset,
-       .init           = wl1271_spi_init,
-       .power          = wl1271_spi_set_power,
-       .dev            = wl1271_spi_wl_to_dev,
-       .enable_irq     = wl1271_spi_enable_interrupts,
-       .disable_irq    = wl1271_spi_disable_interrupts,
+       .read           = wl12xx_spi_raw_read,
+       .write          = wl12xx_spi_raw_write,
+       .reset          = wl12xx_spi_reset,
+       .init           = wl12xx_spi_init,
        .set_block_size = NULL,
 };
 
 static int __devinit wl1271_probe(struct spi_device *spi)
 {
+       struct wl12xx_spi_glue *glue;
        struct wl12xx_platform_data *pdata;
-       struct ieee80211_hw *hw;
-       struct wl1271 *wl;
-       unsigned long irqflags;
-       int ret;
+       struct resource res[1];
+       int ret = -ENOMEM;
 
        pdata = spi->dev.platform_data;
        if (!pdata) {
-               wl1271_error("no platform data");
+               dev_err(&spi->dev, "no platform data\n");
                return -ENODEV;
        }
 
-       hw = wl1271_alloc_hw();
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
+       pdata->ops = &spi_ops;
 
-       wl = hw->priv;
+       glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+       if (!glue) {
+               dev_err(&spi->dev, "can't allocate glue\n");
+               goto out;
+       }
 
-       dev_set_drvdata(&spi->dev, wl);
-       wl->if_priv = spi;
+       glue->dev = &spi->dev;
 
-       wl->if_ops = &spi_ops;
+       spi_set_drvdata(spi, glue);
 
        /* This is the only SPI value that we need to set here, the rest
         * comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
 
        ret = spi_setup(spi);
        if (ret < 0) {
-               wl1271_error("spi_setup failed");
-               goto out_free;
+               dev_err(glue->dev, "spi_setup failed\n");
+               goto out_free_glue;
        }
 
-       wl->set_power = pdata->set_power;
-       if (!wl->set_power) {
-               wl1271_error("set power function missing in platform data");
-               ret = -ENODEV;
-               goto out_free;
+       glue->core = platform_device_alloc("wl12xx", -1);
+       if (!glue->core) {
+               dev_err(glue->dev, "can't allocate platform_device\n");
+               ret = -ENOMEM;
+               goto out_free_glue;
        }
 
-       wl->ref_clock = pdata->board_ref_clock;
-       wl->tcxo_clock = pdata->board_tcxo_clock;
-       wl->platform_quirks = pdata->platform_quirks;
+       glue->core->dev.parent = &spi->dev;
 
-       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
-               irqflags = IRQF_TRIGGER_RISING;
-       else
-               irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+       memset(res, 0x00, sizeof(res));
 
-       wl->irq = spi->irq;
-       if (wl->irq < 0) {
-               wl1271_error("irq missing in platform data");
-               ret = -ENODEV;
-               goto out_free;
-       }
+       res[0].start = spi->irq;
+       res[0].flags = IORESOURCE_IRQ;
+       res[0].name = "irq";
 
-       ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
-                                  irqflags,
-                                  DRIVER_NAME, wl);
-       if (ret < 0) {
-               wl1271_error("request_irq() failed: %d", ret);
-               goto out_free;
+       ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+       if (ret) {
+               dev_err(glue->dev, "can't add resources\n");
+               goto out_dev_put;
        }
 
-       disable_irq(wl->irq);
-
-       ret = wl1271_init_ieee80211(wl);
-       if (ret)
-               goto out_irq;
+       ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+       if (ret) {
+               dev_err(glue->dev, "can't add platform data\n");
+               goto out_dev_put;
+       }
 
-       ret = wl1271_register_hw(wl);
-       if (ret)
-               goto out_irq;
+       ret = platform_device_add(glue->core);
+       if (ret) {
+               dev_err(glue->dev, "can't register platform device\n");
+               goto out_dev_put;
+       }
 
        return 0;
 
- out_irq:
-       free_irq(wl->irq, wl);
-
- out_free:
-       wl1271_free_hw(wl);
+out_dev_put:
+       platform_device_put(glue->core);
 
+out_free_glue:
+       kfree(glue);
+out:
        return ret;
 }
 
 static int __devexit wl1271_remove(struct spi_device *spi)
 {
-       struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+       struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
 
-       wl1271_unregister_hw(wl);
-       free_irq(wl->irq, wl);
-       wl1271_free_hw(wl);
+       platform_device_del(glue->core);
+       platform_device_put(glue->core);
+       kfree(glue);
 
        return 0;
 }
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
 static struct spi_driver wl1271_spi_driver = {
        .driver = {
                .name           = "wl1271_spi",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
        },
 
index 4ae8eff..978cf2d 100644 (file)
 #include <net/genetlink.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "acx.h"
 #include "reg.h"
+#include "ps.h"
 
 #define WL1271_TM_MAX_DATA_LENGTH 1024
 
@@ -87,31 +89,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
                return -EMSGSIZE;
 
        mutex_lock(&wl->mutex);
-       ret = wl1271_cmd_test(wl, buf, buf_len, answer);
-       mutex_unlock(&wl->mutex);
 
+       if (wl->state == WL1271_STATE_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wl1271_cmd_test(wl, buf, buf_len, answer);
        if (ret < 0) {
                wl1271_warning("testmode cmd test failed: %d", ret);
-               return ret;
+               goto out_sleep;
        }
 
        if (answer) {
                len = nla_total_size(buf_len);
                skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
-               if (!skb)
-                       return -ENOMEM;
+               if (!skb) {
+                       ret = -ENOMEM;
+                       goto out_sleep;
+               }
 
                NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
                ret = cfg80211_testmode_reply(skb);
                if (ret < 0)
-                       return ret;
+                       goto out_sleep;
        }
 
-       return 0;
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+
+       return ret;
 
 nla_put_failure:
        kfree_skb(skb);
-       return -EMSGSIZE;
+       ret = -EMSGSIZE;
+       goto out_sleep;
 }
 
 static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +146,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
 
        ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
 
+       mutex_lock(&wl->mutex);
+
+       if (wl->state == WL1271_STATE_OFF) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       if (!cmd) {
+               ret = -ENOMEM;
+               goto out_sleep;
+       }
 
-       mutex_lock(&wl->mutex);
        ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
-       mutex_unlock(&wl->mutex);
-
        if (ret < 0) {
                wl1271_warning("testmode cmd interrogate failed: %d", ret);
-               kfree(cmd);
-               return ret;
+               goto out_free;
        }
 
        skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
        if (!skb) {
-               kfree(cmd);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free;
        }
 
        NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+       ret = cfg80211_testmode_reply(skb);
+       if (ret < 0)
+               goto out_free;
+
+out_free:
+       kfree(cmd);
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
 
-       return 0;
+       return ret;
 
 nla_put_failure:
        kfree_skb(skb);
-       return -EMSGSIZE;
+       ret = -EMSGSIZE;
+       goto out_free;
 }
 
 static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
index bad9e29..7d727ee 100644 (file)
 #include <linux/etherdevice.h>
 
 #include "wl12xx.h"
+#include "debug.h"
 #include "io.h"
 #include "reg.h"
 #include "ps.h"
 #include "tx.h"
 #include "event.h"
 
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+                                     struct wl12xx_vif *wlvif, u8 id)
 {
        int ret;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
 
        if (is_ap)
                ret = wl12xx_cmd_set_default_wep_key(wl, id,
-                                                    wl->ap_bcast_hlid);
+                                                    wlvif->ap.bcast_hlid);
        else
-               ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+               ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
 
        if (ret < 0)
                return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
 }
 
 static int wl1271_tx_update_filters(struct wl1271 *wl,
-                                                struct sk_buff *skb)
+                                   struct wl12xx_vif *wlvif,
+                                   struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
        int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
        if (!ieee80211_is_auth(hdr->frame_control))
                return 0;
 
-       if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+       if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
                goto out;
 
        wl1271_debug(DEBUG_CMD, "starting device role for roaming");
-       ret = wl12xx_cmd_role_start_dev(wl);
-       if (ret < 0)
-               goto out;
-
-       ret = wl12xx_roc(wl, wl->dev_role_id);
+       ret = wl12xx_start_dev(wl, wlvif);
        if (ret < 0)
                goto out;
 out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
                wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
 }
 
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif,
+                                   u8 hlid)
 {
        bool fw_ps, single_sta;
        u8 tx_pkts;
 
-       /* only regulate station links */
-       if (hlid < WL1271_AP_STA_HLID_START)
+       if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
                return;
 
-       if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
-           return;
-
        fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
        tx_pkts = wl->links[hlid].allocated_pkts;
        single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
         * case FW-memory congestion is not a problem.
         */
        if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
-               wl1271_ps_link_start(wl, hlid, true);
+               wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
        return wl->dummy_packet == skb;
 }
 
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                        struct sk_buff *skb)
 {
        struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
 
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
        } else {
                struct ieee80211_hdr *hdr;
 
-               if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+               if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
                        return wl->system_hlid;
 
                hdr = (struct ieee80211_hdr *)skb->data;
                if (ieee80211_is_mgmt(hdr->frame_control))
-                       return wl->ap_global_hlid;
+                       return wlvif->ap.global_hlid;
                else
-                       return wl->ap_bcast_hlid;
+                       return wlvif->ap.bcast_hlid;
        }
 }
 
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 
-       if (wl12xx_is_dummy_packet(wl, skb))
+       if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
                return wl->system_hlid;
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               return wl12xx_tx_get_hlid_ap(wl, skb);
+       if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+               return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
 
-       wl1271_tx_update_filters(wl, skb);
+       wl1271_tx_update_filters(wl, wlvif, skb);
 
-       if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
-            test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+       if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+            test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
            !ieee80211_is_auth(hdr->frame_control) &&
            !ieee80211_is_assoc_req(hdr->frame_control))
-               return wl->sta_hlid;
+               return wlvif->sta.hlid;
        else
-               return wl->dev_hlid;
+               return wlvif->dev_hlid;
 }
 
 static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
                                                unsigned int packet_length)
 {
-       if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
-               return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
-       else
+       if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
                return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+       else
+               return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
 }
 
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
-                               u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             struct sk_buff *skb, u32 extra, u32 buf_offset,
+                             u8 hlid)
 {
        struct wl1271_tx_hw_descr *desc;
        u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        u32 total_blocks;
        int id, ret = -EBUSY, ac;
        u32 spare_blocks = wl->tx_spare_blocks;
+       bool is_dummy = false;
 
        if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
                return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        len = wl12xx_calc_packet_alignment(wl, total_len);
 
        /* in case of a dummy packet, use default amount of spare mem blocks */
-       if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+       if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+               is_dummy = true;
                spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+       }
 
        total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
                spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
                ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                wl->tx_allocated_pkts[ac]++;
 
-               if (wl->bss_type == BSS_TYPE_AP_BSS &&
-                   hlid >= WL1271_AP_STA_HLID_START)
+               if (!is_dummy && wlvif &&
+                   wlvif->bss_type == BSS_TYPE_AP_BSS &&
+                   test_bit(hlid, wlvif->ap.sta_hlid_map))
                        wl->links[hlid].allocated_pkts++;
 
                ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
        return ret;
 }
 
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
-                             u32 extra, struct ieee80211_tx_info *control,
-                             u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                              struct sk_buff *skb, u32 extra,
+                              struct ieee80211_tx_info *control, u8 hlid)
 {
        struct timespec ts;
        struct wl1271_tx_hw_descr *desc;
        int aligned_len, ac, rate_idx;
        s64 hosttime;
-       u16 tx_attr;
+       u16 tx_attr = 0;
+       bool is_dummy;
 
        desc = (struct wl1271_tx_hw_descr *) skb->data;
 
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        hosttime = (timespec_to_ns(&ts) >> 10);
        desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
 
-       if (wl->bss_type != BSS_TYPE_AP_BSS)
+       is_dummy = wl12xx_is_dummy_packet(wl, skb);
+       if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
                desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
        else
                desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
        ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
        desc->tid = skb->priority;
 
-       if (wl12xx_is_dummy_packet(wl, skb)) {
+       if (is_dummy) {
                /*
                 * FW expects the dummy packet to have an invalid session id -
                 * any session id that is different than the one set in the join
                 */
-               tx_attr = ((~wl->session_counter) <<
+               tx_attr = (SESSION_COUNTER_INVALID <<
                           TX_HW_ATTR_OFST_SESSION_COUNTER) &
                           TX_HW_ATTR_SESSION_COUNTER;
 
                tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
-       } else {
+       } else if (wlvif) {
                /* configure the tx attributes */
-               tx_attr =
-                       wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+               tx_attr = wlvif->session_counter <<
+                         TX_HW_ATTR_OFST_SESSION_COUNTER;
        }
 
        desc->hlid = hlid;
-
-       if (wl->bss_type != BSS_TYPE_AP_BSS) {
+       if (is_dummy || !wlvif)
+               rate_idx = 0;
+       else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
                /* if the packets are destined for AP (have a STA entry)
                   send them with AP rate policies, otherwise use default
                   basic rates */
-               if (control->control.sta)
-                       rate_idx = ACX_TX_AP_FULL_RATE;
+               if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+                       rate_idx = wlvif->sta.p2p_rate_idx;
+               else if (control->control.sta)
+                       rate_idx = wlvif->sta.ap_rate_idx;
                else
-                       rate_idx = ACX_TX_BASIC_RATE;
+                       rate_idx = wlvif->sta.basic_rate_idx;
        } else {
-               if (hlid == wl->ap_global_hlid)
-                       rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
-               else if (hlid == wl->ap_bcast_hlid)
-                       rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+               if (hlid == wlvif->ap.global_hlid)
+                       rate_idx = wlvif->ap.mgmt_rate_idx;
+               else if (hlid == wlvif->ap.bcast_hlid)
+                       rate_idx = wlvif->ap.bcast_rate_idx;
                else
-                       rate_idx = ac;
+                       rate_idx = wlvif->ap.ucast_rate_idx[ac];
        }
 
        tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
 }
 
 /* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
-                                                       u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                  struct sk_buff *skb, u32 buf_offset)
 {
        struct ieee80211_tx_info *info;
        u32 extra = 0;
        int ret = 0;
        u32 total_len;
        u8 hlid;
+       bool is_dummy;
 
        if (!skb)
                return -EINVAL;
 
        info = IEEE80211_SKB_CB(skb);
 
+       /* TODO: handle dummy packets on multi-vifs */
+       is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
        if (info->control.hw_key &&
            info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
                extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
                is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
                         (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-               if (unlikely(is_wep && wl->default_key != idx)) {
-                       ret = wl1271_set_default_wep_key(wl, idx);
+               if (unlikely(is_wep && wlvif->default_key != idx)) {
+                       ret = wl1271_set_default_wep_key(wl, wlvif, idx);
                        if (ret < 0)
                                return ret;
-                       wl->default_key = idx;
+                       wlvif->default_key = idx;
                }
        }
-
-       hlid = wl1271_tx_get_hlid(wl, skb);
+       hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
        if (hlid == WL12XX_INVALID_LINK_ID) {
                wl1271_error("invalid hlid. dropping skb 0x%p", skb);
                return -EINVAL;
        }
 
-       ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+       ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
        if (ret < 0)
                return ret;
 
-       wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+       wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
+       if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
                wl1271_tx_ap_update_inconnection_sta(wl, skb);
-               wl1271_tx_regulate_link(wl, hlid);
+               wl1271_tx_regulate_link(wl, wlvif, hlid);
        }
 
        /*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
        memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
 
        /* Revert side effects in the dummy packet skb, so it can be reused */
-       if (wl12xx_is_dummy_packet(wl, skb))
+       if (is_dummy)
                skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
 
        return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
        return &queues[q];
 }
 
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+                                             struct wl1271_link *lnk)
 {
-       struct sk_buff *skb = NULL;
+       struct sk_buff *skb;
        unsigned long flags;
        struct sk_buff_head *queue;
 
-       queue = wl1271_select_queue(wl, wl->tx_queue);
+       queue = wl1271_select_queue(wl, lnk->tx_queue);
        if (!queue)
-               goto out;
+               return NULL;
 
        skb = skb_dequeue(queue);
-
-out:
        if (skb) {
                int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
                spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
        return skb;
 }
 
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+                                             struct wl12xx_vif *wlvif)
 {
        struct sk_buff *skb = NULL;
-       unsigned long flags;
        int i, h, start_hlid;
-       struct sk_buff_head *queue;
 
        /* start from the link after the last one */
-       start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+       start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
 
        /* dequeue according to AC, round robin on each link */
-       for (i = 0; i < AP_MAX_LINKS; i++) {
-               h = (start_hlid + i) % AP_MAX_LINKS;
+       for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+               h = (start_hlid + i) % WL12XX_MAX_LINKS;
 
                /* only consider connected stations */
-               if (h >= WL1271_AP_STA_HLID_START &&
-                   !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+               if (!test_bit(h, wlvif->links_map))
                        continue;
 
-               queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
-               if (!queue)
+               skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+               if (!skb)
                        continue;
 
-               skb = skb_dequeue(queue);
-               if (skb)
-                       break;
+               wlvif->last_tx_hlid = h;
+               break;
        }
 
-       if (skb) {
-               int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
-               wl->last_tx_hlid = h;
-               spin_lock_irqsave(&wl->wl_lock, flags);
-               wl->tx_queue_count[q]--;
-               spin_unlock_irqrestore(&wl->wl_lock, flags);
-       } else {
-               wl->last_tx_hlid = 0;
-       }
+       if (!skb)
+               wlvif->last_tx_hlid = 0;
 
        return skb;
 }
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
 {
        unsigned long flags;
+       struct wl12xx_vif *wlvif = wl->last_wlvif;
        struct sk_buff *skb = NULL;
 
-       if (wl->bss_type == BSS_TYPE_AP_BSS)
-               skb = wl1271_ap_skb_dequeue(wl);
-       else
-               skb = wl1271_sta_skb_dequeue(wl);
+       if (wlvif) {
+               wl12xx_for_each_wlvif_continue(wl, wlvif) {
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       if (skb) {
+                               wl->last_wlvif = wlvif;
+                               break;
+                       }
+               }
+       }
+
+       /* do another pass */
+       if (!skb) {
+               wl12xx_for_each_wlvif(wl, wlvif) {
+                       skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+                       if (skb) {
+                               wl->last_wlvif = wlvif;
+                               break;
+                       }
+               }
+       }
+
+       if (!skb)
+               skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
 
        if (!skb &&
            test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
        return skb;
 }
 
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                 struct sk_buff *skb)
 {
        unsigned long flags;
        int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
 
        if (wl12xx_is_dummy_packet(wl, skb)) {
                set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
-       } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               u8 hlid = wl1271_tx_get_hlid(wl, skb);
+       } else {
+               u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
                skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
                /* make sure we dequeue the same packet next time */
-               wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
-       } else {
-               skb_queue_head(&wl->tx_queue[q], skb);
+               wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+                                     WL12XX_MAX_LINKS;
        }
 
        spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
        return ieee80211_is_data_present(hdr->frame_control);
 }
 
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+       struct wl12xx_vif *wlvif;
+       u32 timeout;
+       u8 hlid;
+
+       if (!wl->conf.rx_streaming.interval)
+               return;
+
+       if (!wl->conf.rx_streaming.always &&
+           !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+               return;
+
+       timeout = wl->conf.rx_streaming.duration;
+       wl12xx_for_each_wlvif_sta(wl, wlvif) {
+               bool found = false;
+               for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+                       if (test_bit(hlid, wlvif->links_map)) {
+                               found  = true;
+                               break;
+                       }
+               }
+
+               if (!found)
+                       continue;
+
+               /* enable rx streaming */
+               if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+                       ieee80211_queue_work(wl->hw,
+                                            &wlvif->rx_streaming_enable_work);
+
+               mod_timer(&wlvif->rx_streaming_timer,
+                         jiffies + msecs_to_jiffies(timeout));
+       }
+}
+
 void wl1271_tx_work_locked(struct wl1271 *wl)
 {
+       struct wl12xx_vif *wlvif;
        struct sk_buff *skb;
+       struct wl1271_tx_hw_descr *desc;
        u32 buf_offset = 0;
        bool sent_packets = false;
-       bool had_data = false;
-       bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+       unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
        int ret;
 
        if (unlikely(wl->state == WL1271_STATE_OFF))
                return;
 
        while ((skb = wl1271_skb_dequeue(wl))) {
-               if (wl1271_tx_is_data_present(skb))
-                       had_data = true;
+               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+               bool has_data = false;
 
-               ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+               wlvif = NULL;
+               if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+                       wlvif = wl12xx_vif_to_data(info->control.vif);
+
+               has_data = wlvif && wl1271_tx_is_data_present(skb);
+               ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
                if (ret == -EAGAIN) {
                        /*
                         * Aggregation buffer is full.
                         * Flush buffer and try again.
                         */
-                       wl1271_skb_queue_head(wl, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb);
                        wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
                                     buf_offset, true);
                        sent_packets = true;
@@ -672,7 +735,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
                         * Firmware buffer is full.
                         * Queue back last skb, and stop aggregating.
                         */
-                       wl1271_skb_queue_head(wl, skb);
+                       wl1271_skb_queue_head(wl, wlvif, skb);
                        /* No work left, avoid scheduling redundant tx work */
                        set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
                        goto out_ack;
@@ -682,6 +745,10 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
                }
                buf_offset += ret;
                wl->tx_packets_count++;
+               if (has_data) {
+                       desc = (struct wl1271_tx_hw_descr *) skb->data;
+                       __set_bit(desc->hlid, active_hlids);
+               }
        }
 
 out_ack:
@@ -701,19 +768,7 @@ out_ack:
 
                wl1271_handle_tx_low_watermark(wl);
        }
-       if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
-           (wl->conf.rx_streaming.always ||
-            test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
-               u32 timeout = wl->conf.rx_streaming.duration;
-
-               /* enable rx streaming */
-               if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
-                       ieee80211_queue_work(wl->hw,
-                                            &wl->rx_streaming_enable_work);
-
-               mod_timer(&wl->rx_streaming_timer,
-                         jiffies + msecs_to_jiffies(timeout));
-       }
+       wl12xx_rearm_rx_streaming(wl, active_hlids);
 }
 
 void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +792,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
                                      struct wl1271_tx_hw_res_descr *result)
 {
        struct ieee80211_tx_info *info;
+       struct ieee80211_vif *vif;
+       struct wl12xx_vif *wlvif;
        struct sk_buff *skb;
        int id = result->id;
        int rate = -1;
@@ -756,11 +813,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
                return;
        }
 
+       /* info->control is valid as long as we don't update info->status */
+       vif = info->control.vif;
+       wlvif = wl12xx_vif_to_data(vif);
+
        /* update the TX status info */
        if (result->status == TX_SUCCESS) {
                if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
                        info->flags |= IEEE80211_TX_STAT_ACK;
-               rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+               rate = wl1271_rate_to_idx(result->rate_class_index,
+                                         wlvif->band);
                retries = result->ack_failures;
        } else if (result->status == TX_RETRY_EXCEEDED) {
                wl->stats.excessive_retries++;
@@ -783,14 +845,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
             info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
             info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
                u8 fw_lsb = result->tx_security_sequence_number_lsb;
-               u8 cur_lsb = wl->tx_security_last_seq_lsb;
+               u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
 
                /*
                 * update security sequence number, taking care of potential
                 * wrap-around
                 */
-               wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
-               wl->tx_security_last_seq_lsb = fw_lsb;
+               wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+               wlvif->tx_security_last_seq_lsb = fw_lsb;
        }
 
        /* remove private header from packet */
@@ -886,39 +948,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
 }
 
 /* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int i;
-       struct sk_buff *skb;
-       struct ieee80211_tx_info *info;
 
        /* TX failure */
-       if (wl->bss_type == BSS_TYPE_AP_BSS) {
-               for (i = 0; i < AP_MAX_LINKS; i++) {
-                       wl1271_free_sta(wl, i);
-                       wl1271_tx_reset_link_queues(wl, i);
-                       wl->links[i].allocated_pkts = 0;
-                       wl->links[i].prev_freed_pkts = 0;
-               }
-
-               wl->last_tx_hlid = 0;
-       } else {
-               for (i = 0; i < NUM_TX_QUEUES; i++) {
-                       while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
-                               wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
-                                            skb);
-
-                               if (!wl12xx_is_dummy_packet(wl, skb)) {
-                                       info = IEEE80211_SKB_CB(skb);
-                                       info->status.rates[0].idx = -1;
-                                       info->status.rates[0].count = 0;
-                                       ieee80211_tx_status_ni(wl->hw, skb);
-                               }
-                       }
-               }
+       for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+               if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+                       wl1271_free_sta(wl, wlvif, i);
+               else
+                       wlvif->sta.ba_rx_bitmap = 0;
 
-               wl->ba_rx_bitmap = 0;
+               wl1271_tx_reset_link_queues(wl, i);
+               wl->links[i].allocated_pkts = 0;
+               wl->links[i].prev_freed_pkts = 0;
        }
+       wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+       int i;
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
 
        for (i = 0; i < NUM_TX_QUEUES; i++)
                wl->tx_queue_count[i] = 0;
index dc4f09a..2dbb24e 100644 (file)
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
 void wl1271_tx_work(struct work_struct *work);
 void wl1271_tx_work_locked(struct wl1271 *wl);
 void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
 void wl1271_tx_flush(struct wl1271 *wl);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
                                enum ieee80211_band rate_band);
 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                        struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                     struct sk_buff *skb);
 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
 void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
 
 /* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
 
 #endif
index 1ec90fc..f1c9117 100644 (file)
@@ -35,9 +35,6 @@
 #include "conf.h"
 #include "ini.h"
 
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
 /*
  * FW versions support BA 11n
  * versions marks x.x.x.50-60.x
 #define WL12XX_BA_SUPPORT_FW_COST_VER2_START    50
 #define WL12XX_BA_SUPPORT_FW_COST_VER2_END      60
 
-enum {
-       DEBUG_NONE      = 0,
-       DEBUG_IRQ       = BIT(0),
-       DEBUG_SPI       = BIT(1),
-       DEBUG_BOOT      = BIT(2),
-       DEBUG_MAILBOX   = BIT(3),
-       DEBUG_TESTMODE  = BIT(4),
-       DEBUG_EVENT     = BIT(5),
-       DEBUG_TX        = BIT(6),
-       DEBUG_RX        = BIT(7),
-       DEBUG_SCAN      = BIT(8),
-       DEBUG_CRYPT     = BIT(9),
-       DEBUG_PSM       = BIT(10),
-       DEBUG_MAC80211  = BIT(11),
-       DEBUG_CMD       = BIT(12),
-       DEBUG_ACX       = BIT(13),
-       DEBUG_SDIO      = BIT(14),
-       DEBUG_FILTERS   = BIT(15),
-       DEBUG_ADHOC     = BIT(16),
-       DEBUG_AP        = BIT(17),
-       DEBUG_MASTER    = (DEBUG_ADHOC | DEBUG_AP),
-       DEBUG_ALL       = ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
-       pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
-       pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
-       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
-       pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
-       do { \
-               if (level & wl12xx_debug_level) \
-                       pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
-       } while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len)   \
-       do { \
-               if (level & wl12xx_debug_level) \
-                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
-                                      DUMP_PREFIX_OFFSET, 16, 1,       \
-                                      buf,                             \
-                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
-                                      0);                              \
-       } while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len)     \
-       do { \
-               if (level & wl12xx_debug_level) \
-                       print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
-                                      DUMP_PREFIX_OFFSET, 16, 1,       \
-                                      buf,                             \
-                                      min_t(size_t, len, DEBUG_DUMP_LIMIT), \
-                                      true);                           \
-       } while (0)
-
 #define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
 #define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
 
@@ -142,16 +72,12 @@ extern u32 wl12xx_debug_level;
 #define WL12XX_INVALID_ROLE_ID     0xff
 #define WL12XX_INVALID_LINK_ID     0xff
 
+#define WL12XX_MAX_RATE_POLICIES 16
+
 /* Defined by FW as 0. Will not be freed or allocated. */
 #define WL12XX_SYSTEM_HLID         0
 
 /*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START   3
-
-/*
  * When in AP-mode, we allow (at least) this number of packets
  * to be transmitted to FW for a STA in PS-mode. Only when packets are
  * present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +162,6 @@ struct wl1271_stats {
 
 #define AP_MAX_STATIONS            8
 
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS               (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
 /* FW status registers */
 struct wl12xx_fw_status {
        __le32 intr;
@@ -299,17 +218,14 @@ struct wl1271_scan {
 };
 
 struct wl1271_if_operations {
-       void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+       void (*read)(struct device *child, int addr, void *buf, size_t len,
                     bool fixed);
-       void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+       void (*write)(struct device *child, int addr, void *buf, size_t len,
                     bool fixed);
-       void (*reset)(struct wl1271 *wl);
-       void (*init)(struct wl1271 *wl);
-       int (*power)(struct wl1271 *wl, bool enable);
-       struct device* (*dev)(struct wl1271 *wl);
-       void (*enable_irq)(struct wl1271 *wl);
-       void (*disable_irq)(struct wl1271 *wl);
-       void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+       void (*reset)(struct device *child);
+       void (*init)(struct device *child);
+       int (*power)(struct device *child, bool enable);
+       void (*set_block_size) (struct device *child, unsigned int blksz);
 };
 
 #define MAX_NUM_KEYS 14
@@ -326,29 +242,33 @@ struct wl1271_ap_key {
 };
 
 enum wl12xx_flags {
-       WL1271_FLAG_STA_ASSOCIATED,
-       WL1271_FLAG_IBSS_JOINED,
        WL1271_FLAG_GPIO_POWER,
        WL1271_FLAG_TX_QUEUE_STOPPED,
        WL1271_FLAG_TX_PENDING,
        WL1271_FLAG_IN_ELP,
        WL1271_FLAG_ELP_REQUESTED,
-       WL1271_FLAG_PSM,
-       WL1271_FLAG_PSM_REQUESTED,
        WL1271_FLAG_IRQ_RUNNING,
        WL1271_FLAG_IDLE,
-       WL1271_FLAG_PSPOLL_FAILURE,
-       WL1271_FLAG_STA_STATE_SENT,
        WL1271_FLAG_FW_TX_BUSY,
-       WL1271_FLAG_AP_STARTED,
-       WL1271_FLAG_IF_INITIALIZED,
        WL1271_FLAG_DUMMY_PACKET_PENDING,
        WL1271_FLAG_SUSPENDED,
        WL1271_FLAG_PENDING_WORK,
        WL1271_FLAG_SOFT_GEMINI,
-       WL1271_FLAG_RX_STREAMING_STARTED,
        WL1271_FLAG_RECOVERY_IN_PROGRESS,
-       WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+       WLVIF_FLAG_INITIALIZED,
+       WLVIF_FLAG_STA_ASSOCIATED,
+       WLVIF_FLAG_IBSS_JOINED,
+       WLVIF_FLAG_AP_STARTED,
+       WLVIF_FLAG_PSM,
+       WLVIF_FLAG_PSM_REQUESTED,
+       WLVIF_FLAG_STA_STATE_SENT,
+       WLVIF_FLAG_RX_STREAMING_STARTED,
+       WLVIF_FLAG_PSPOLL_FAILURE,
+       WLVIF_FLAG_CS_PROGRESS,
+       WLVIF_FLAG_AP_PROBE_RESP_SET,
 };
 
 struct wl1271_link {
@@ -366,10 +286,11 @@ struct wl1271_link {
 };
 
 struct wl1271 {
-       struct platform_device *plat_dev;
        struct ieee80211_hw *hw;
        bool mac80211_registered;
 
+       struct device *dev;
+
        void *if_priv;
 
        struct wl1271_if_operations *if_ops;
@@ -399,25 +320,20 @@ struct wl1271 {
 
        s8 hw_pg_ver;
 
-       u8 bssid[ETH_ALEN];
        u8 mac_addr[ETH_ALEN];
-       u8 bss_type;
-       u8 set_bss_type;
-       u8 p2p; /* we are using p2p role */
-       u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
-       u8 ssid_len;
        int channel;
-       u8 role_id;
-       u8 dev_role_id;
        u8 system_hlid;
-       u8 sta_hlid;
-       u8 dev_hlid;
-       u8 ap_global_hlid;
-       u8 ap_bcast_hlid;
 
        unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
        unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
        unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+       unsigned long rate_policies_map[
+                       BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+       struct list_head wlvif_list;
+
+       u8 sta_count;
+       u8 ap_count;
 
        struct wl1271_acx_mem_map *target_mem_map;
 
@@ -440,11 +356,7 @@ struct wl1271 {
        /* Time-offset between host and chipset clocks */
        s64 time_offset;
 
-       /* Session counter for the chipset */
-       int session_counter;
-
        /* Frames scheduled for transmission, not handled yet */
-       struct sk_buff_head tx_queue[NUM_TX_QUEUES];
        int tx_queue_count[NUM_TX_QUEUES];
        long stopped_queues_map;
 
@@ -462,17 +374,6 @@ struct wl1271 {
        struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
        int tx_frames_cnt;
 
-       /*
-        * Security sequence number
-        *     bits 0-15: lower 16 bits part of sequence number
-        *     bits 16-47: higher 32 bits part of sequence number
-        *     bits 48-63: not in use
-        */
-       u64 tx_security_seq;
-
-       /* 8 bits of the last sequence number in use */
-       u8 tx_security_last_seq_lsb;
-
        /* FW Rx counter */
        u32 rx_counter;
 
@@ -507,59 +408,21 @@ struct wl1271 {
        u32 mbox_ptr[2];
 
        /* Are we currently scanning */
+       struct ieee80211_vif *scan_vif;
        struct wl1271_scan scan;
        struct delayed_work scan_complete_work;
 
        bool sched_scanning;
 
-       /* probe-req template for the current AP */
-       struct sk_buff *probereq;
-
-       /* Our association ID */
-       u16 aid;
-
-       /*
-        * currently configured rate set:
-        *      bits  0-15 - 802.11abg rates
-        *      bits 16-23 - 802.11n   MCS index mask
-        * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
-        */
-       u32 basic_rate_set;
-       u32 basic_rate;
-       u32 rate_set;
-       u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
        /* The current band */
        enum ieee80211_band band;
 
-       /* Beaconing interval (needed for ad-hoc) */
-       u32 beacon_int;
-
-       /* Default key (for WEP) */
-       u32 default_key;
-
-       /* Rx Streaming */
-       struct work_struct rx_streaming_enable_work;
-       struct work_struct rx_streaming_disable_work;
-       struct timer_list rx_streaming_timer;
-
        struct completion *elp_compl;
-       struct completion *ps_compl;
        struct delayed_work elp_work;
-       struct delayed_work pspoll_work;
-
-       /* counter for ps-poll delivery failures */
-       int ps_poll_failures;
-
-       /* retry counter for PSM entries */
-       u8 psm_entry_retry;
 
        /* in dBm */
        int power_level;
 
-       int rssi_thold;
-       int last_rssi_event;
-
        struct wl1271_stats stats;
 
        __le32 buffer_32;
@@ -583,20 +446,9 @@ struct wl1271 {
        /* Most recently reported noise in dBm */
        s8 noise;
 
-       /* map for HLIDs of associated stations - when operating in AP mode */
-       unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
-       /* recoreded keys for AP-mode - set here before AP startup */
-       struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
        /* bands supported by this instance of wl12xx */
        struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
 
-       /* RX BA constraint value */
-       bool ba_support;
-       u8 ba_rx_bitmap;
-       bool ba_allowed;
-
        int tcxo_clock;
 
        /*
@@ -610,10 +462,7 @@ struct wl1271 {
         * AP-mode - links indexed by HLID. The global and broadcast links
         * are always active.
         */
-       struct wl1271_link links[AP_MAX_LINKS];
-
-       /* the hlid of the link where the last transmitted skb came from */
-       int last_tx_hlid;
+       struct wl1271_link links[WL12XX_MAX_LINKS];
 
        /* AP-mode - a bitmap of links currently in PS mode according to FW */
        u32 ap_fw_ps_map;
@@ -632,21 +481,173 @@ struct wl1271 {
 
        /* AP-mode - number of currently connected stations */
        int active_sta_count;
+
+       /* last wlvif we transmitted from */
+       struct wl12xx_vif *last_wlvif;
 };
 
 struct wl1271_station {
        u8 hlid;
 };
 
+struct wl12xx_vif {
+       struct wl1271 *wl;
+       struct list_head list;
+       unsigned long flags;
+       u8 bss_type;
+       u8 p2p; /* we are using p2p role */
+       u8 role_id;
+
+       /* sta/ibss specific */
+       u8 dev_role_id;
+       u8 dev_hlid;
+
+       union {
+               struct {
+                       u8 hlid;
+                       u8 ba_rx_bitmap;
+
+                       u8 basic_rate_idx;
+                       u8 ap_rate_idx;
+                       u8 p2p_rate_idx;
+               } sta;
+               struct {
+                       u8 global_hlid;
+                       u8 bcast_hlid;
+
+                       /* HLIDs bitmap of associated stations */
+                       unsigned long sta_hlid_map[BITS_TO_LONGS(
+                                                       WL12XX_MAX_LINKS)];
+
+                       /* recoreded keys - set here before AP startup */
+                       struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+                       u8 mgmt_rate_idx;
+                       u8 bcast_rate_idx;
+                       u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+               } ap;
+       };
+
+       /* the hlid of the last transmitted skb */
+       int last_tx_hlid;
+
+       unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+       u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+       u8 ssid_len;
+
+       /* The current band */
+       enum ieee80211_band band;
+       int channel;
+
+       u32 bitrate_masks[IEEE80211_NUM_BANDS];
+       u32 basic_rate_set;
+
+       /*
+        * currently configured rate set:
+        *      bits  0-15 - 802.11abg rates
+        *      bits 16-23 - 802.11n   MCS index mask
+        * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+        */
+       u32 basic_rate;
+       u32 rate_set;
+
+       /* probe-req template for the current AP */
+       struct sk_buff *probereq;
+
+       /* Beaconing interval (needed for ad-hoc) */
+       u32 beacon_int;
+
+       /* Default key (for WEP) */
+       u32 default_key;
+
+       /* Our association ID */
+       u16 aid;
+
+       /* Session counter for the chipset */
+       int session_counter;
+
+       struct completion *ps_compl;
+       struct delayed_work pspoll_work;
+
+       /* counter for ps-poll delivery failures */
+       int ps_poll_failures;
+
+       /* retry counter for PSM entries */
+       u8 psm_entry_retry;
+
+       /* in dBm */
+       int power_level;
+
+       int rssi_thold;
+       int last_rssi_event;
+
+       /* RX BA constraint value */
+       bool ba_support;
+       bool ba_allowed;
+
+       /* Rx Streaming */
+       struct work_struct rx_streaming_enable_work;
+       struct work_struct rx_streaming_disable_work;
+       struct timer_list rx_streaming_timer;
+
+       /*
+        * This struct must be last!
+        * data that has to be saved acrossed reconfigs (e.g. recovery)
+        * should be declared in this struct.
+        */
+       struct {
+               u8 persistent[0];
+               /*
+                * Security sequence number
+                *     bits 0-15: lower 16 bits part of sequence number
+                *     bits 16-47: higher 32 bits part of sequence number
+                *     bits 48-63: not in use
+                */
+               u64 tx_security_seq;
+
+               /* 8 bits of the last sequence number in use */
+               u8 tx_security_last_seq_lsb;
+       };
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+       return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+       return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+               list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+               list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type)   \
+               wl12xx_for_each_wlvif(wl, wlvif)                \
+                       if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif)   \
+               wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif)    \
+               wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
 int wl1271_plt_start(struct wl1271 *wl);
 int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_queue_recovery_work(struct wl1271 *wl);
 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
 
 #define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
 
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
 
 #define WL1271_DEFAULT_POWER_LEVEL 0
 
@@ -669,8 +670,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
 /* Each RX/TX transaction requires an end-of-transaction transfer */
 #define WL12XX_QUIRK_END_OF_TRANSACTION                BIT(0)
 
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT       BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT    BIT(2)
 
 /* Older firmwares did not implement the FW logger over bus feature */
 #define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED     BIT(4)
index f7971d3..8f0ffaf 100644 (file)
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
        u8 ta[ETH_ALEN];
 } __packed;
 
-struct wl12xx_qos_null_data_template {
-       struct ieee80211_header header;
-       __le16 qos_ctl;
-} __packed;
-
 struct wl12xx_arp_rsp_template {
        struct ieee80211_hdr_3addr hdr;
 
index 973b110..3c96b33 100644 (file)
@@ -2,7 +2,7 @@
 #include <linux/err.h>
 #include <linux/wl12xx.h>
 
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
 
 int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 {
@@ -18,7 +18,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
        return 0;
 }
 
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
 {
        if (!platform_data)
                return ERR_PTR(-ENODEV);
index 1825629..0b5c18f 100644 (file)
@@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
        return 0;
 }
 
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct xenvif *vif = netdev_priv(dev);
 
index 15e332d..639cf8a 100644 (file)
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        struct gnttab_copy *copy_gop;
        struct netbk_rx_meta *meta;
        /*
-        * These variables a used iff get_page_ext returns true,
+        * These variables are used iff get_page_ext returns true,
         * in which case they are guaranteed to be initialized.
         */
        unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
                if (!page)
                        return NULL;
 
-               netbk->mmap_pages[pending_idx] = page;
-
                gop->source.u.ref = txp->gref;
                gop->source.domid = vif->domid;
                gop->source.offset = txp->offset;
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        continue;
                }
 
-               netbk->mmap_pages[pending_idx] = page;
-
                gop->source.u.ref = txreq.gref;
                gop->source.domid = vif->domid;
                gop->source.offset = txreq.offset;
index 226faab..0a59c57 100644 (file)
@@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev);
 #define xennet_sysfs_delif(dev) do { } while (0)
 #endif
 
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
 {
        return dev->features & NETIF_F_SG;
 }
@@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev)
        gnttab_free_grant_references(np->gref_rx_head);
 }
 
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct netfront_info *np = netdev_priv(dev);
        int val;
@@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
                netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
-       case XenbusStateConnected:
        case XenbusStateUnknown:
        case XenbusStateClosed:
                break;
@@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
                if (xennet_connect(netdev) != 0)
                        break;
                xenbus_switch_state(dev, XenbusStateConnected);
+               break;
+
+       case XenbusStateConnected:
                netif_notify_peers(netdev);
                break;
 
index 7bcb1fe..dbf214e 100644 (file)
@@ -1339,7 +1339,7 @@ error:
        return 0;
 }
 
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
                                                struct sk_buff *skb,
                                                data_exchange_cb_t cb,
                                                void *cb_context)
index 94f49ff..8af868b 100644 (file)
@@ -263,6 +263,11 @@ error:
        return PTR_ERR(vqs[i]);
 }
 
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+       return "";
+}
+
 /*
  * The config ops structure as defined by virtio config
  */
@@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
        .reset = kvm_reset,
        .find_vqs = kvm_find_vqs,
        .del_vqs = kvm_del_vqs,
+       .bus_name = kvm_bus_name,
 };
 
 /*
index a21ae3d..c4e2004 100644 (file)
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
        spin_unlock_bh(&card->vlanlock);
 }
 
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
 
        QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
        if (!vid)
-               return;
+               return 0;
        if (card->info.type == QETH_CARD_TYPE_OSM) {
                QETH_CARD_TEXT(card, 3, "aidOSM");
-               return;
+               return 0;
        }
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "aidREC");
-               return;
+               return 0;
        }
        id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
        if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
                spin_lock_bh(&card->vlanlock);
                list_add_tail(&id->list, &card->vid_list);
                spin_unlock_bh(&card->vlanlock);
+       } else {
+               return -ENOMEM;
        }
+       return 0;
 }
 
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (card->info.type == QETH_CARD_TYPE_OSM) {
                QETH_CARD_TEXT(card, 3, "kidOSM");
-               return;
+               return 0;
        }
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "kidREC");
-               return;
+               return 0;
        }
        spin_lock_bh(&card->vlanlock);
        list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
                kfree(tmpid);
        }
        qeth_l2_set_multicast_list(card->dev);
+       return 0;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
index 4d5307d..b3b045c 100644 (file)
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
        qeth_l3_free_vlan_addresses6(card, vid);
 }
 
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
 
        set_bit(vid, card->active_vlans);
-       return;
+       return 0;
 }
 
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 {
        struct qeth_card *card = dev->ml_priv;
        unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "kidREC");
-               return;
+               return 0;
        }
        spin_lock_irqsave(&card->vlanlock, flags);
        /* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        clear_bit(vid, card->active_vlans);
        spin_unlock_irqrestore(&card->vlanlock, flags);
        qeth_l3_set_multicast_list(card->dev);
+       return 0;
 }
 
 static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2759,7 +2760,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
        rcu_read_lock();
        dst = skb_dst(skb);
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
        if (n) {
                cast_type = n->type;
                rcu_read_unlock();
@@ -2855,7 +2856,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
        rcu_read_lock();
        dst = skb_dst(skb);
        if (dst)
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
        if (ipv == 4) {
                /* IPv4 */
                hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -3209,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev)
        return 0;
 }
 
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
 
@@ -3223,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
        return features;
 }
 
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
        u32 changed = dev->features ^ features;
index 000294a..36739da 100644 (file)
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
                csk->saddr.sin_addr.s_addr = chba->ipv4addr;
 
        csk->rss_qid = 0;
-       csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+       csk->l2t = t3_l2t_get(t3dev, dst, ndev);
        if (!csk->l2t) {
                pr_err("NO l2t available.\n");
                return -EINVAL;
index ac7a9b1..5a4a3bf 100644 (file)
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
        struct net_device *ndev = cdev->ports[csk->port_id];
        struct port_info *pi = netdev_priv(ndev);
        struct sk_buff *skb = NULL;
+       struct neighbour *n;
        unsigned int step;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
 
-       csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+       n = dst_get_neighbour_noref(csk->dst);
+       if (!n) {
+               pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+               goto rel_resource;
+       }
+       csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
        if (!csk->l2t) {
                pr_err("%s, cannot alloc l2t.\n", ndev->name);
                goto rel_resource;
index c10f74a..1d25a87 100644 (file)
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        struct net_device *ndev;
        struct cxgbi_device *cdev;
        struct rtable *rt = NULL;
+       struct neighbour *n;
        struct flowi4 fl4;
        struct cxgbi_sock *csk = NULL;
        unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                goto err_out;
        }
        dst = &rt->dst;
-       ndev = dst_get_neighbour(dst)->dev;
+       n = dst_get_neighbour_noref(dst);
+       if (!n) {
+               err = -ENODEV;
+               goto rel_rt;
+       }
+       ndev = n->dev;
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
                pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
                ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
                mtu = ndev->mtu;
                pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
-                       dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+                       n->dev->name, ndev->name, mtu);
        }
 
        cdev = cxgbi_device_find_by_netdev(ndev, &port);
index 16a509a..7cdcb63 100644 (file)
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
 static int
 pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 {
-       struct net_device *dev = fp->dev;
        struct page *page;
        int err;
 
-       page = __netdev_alloc_page(dev, gfp_flags);
+       page = alloc_page(gfp_flags);
        if (!page)
                return -ENOMEM;
 
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
 
        err = usb_ep_queue(fp->out_ep, req, gfp_flags);
        if (unlikely(err))
-               netdev_free_page(dev, page);
+               put_page(page);
        return err;
 }
 
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
        }
 
        if (page)
-               netdev_free_page(dev, page);
+               put_page(page);
        if (req)
-               pn_rx_submit(fp, req, GFP_ATOMIC);
+               pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 
                        netif_carrier_on(dev);
                        for (i = 0; i < phonet_rxq_size; i++)
-                               pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+                               pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
                }
                spin_unlock(&port->lock);
                return 0;
index 7317dc2..0269717 100644 (file)
@@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        return 0;
 }
 
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+       struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 
+       return vm_dev->pdev->name;
+}
 
 static struct virtio_config_ops virtio_mmio_config_ops = {
        .get            = vm_get,
@@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
        .del_vqs        = vm_del_vqs,
        .get_features   = vm_get_features,
        .finalize_features = vm_finalize_features,
+       .bus_name       = vm_bus_name,
 };
 
 
index 03d1984..baabb79 100644 (file)
@@ -598,6 +598,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
                                  false, false);
 }
 
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       return pci_name(vp_dev->pci_dev);
+}
+
 static struct virtio_config_ops virtio_pci_config_ops = {
        .get            = vp_get,
        .set            = vp_set,
@@ -608,6 +615,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
        .del_vqs        = vp_del_vqs,
        .get_features   = vp_get_features,
        .finalize_features = vp_finalize_features,
+       .bus_name       = vp_bus_name,
 };
 
 static void virtio_pci_release_dev(struct device *_d)
index 990626e..0b3109e 100644 (file)
@@ -281,7 +281,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
        } else {
                struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
                struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
-               ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
+               ret6->sin6_addr = in6->sin6_addr;
        }
 
        return 0;
index 9a6115e..49c1704 100644 (file)
@@ -64,4 +64,7 @@
 #define SO_DOMAIN              39
 
 #define SO_RXQ_OVFL             40
+
+#define SO_WIFI_STATUS         41
+#define SCM_WIFI_STATUS        SO_WIFI_STATUS
 #endif /* __ASM_GENERIC_SOCKET_H */
index 619b565..0b091b3 100644 (file)
@@ -185,6 +185,7 @@ header-y += if_pppol2tp.h
 header-y += if_pppox.h
 header-y += if_slip.h
 header-y += if_strip.h
+header-y += if_team.h
 header-y += if_tr.h
 header-y += if_tun.h
 header-y += if_tunnel.h
index 49a83ca..f4ff882 100644 (file)
@@ -445,16 +445,6 @@ void vcc_insert_socket(struct sock *sk);
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
-/*
- * This is approximately the algorithm used by alloc_skb.
- *
- */
-
-static inline int atm_guess_pdu2truesize(int size)
-{
-       return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info);
-}
-
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
new file mode 100644 (file)
index 0000000..7702641
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _CAN_PLATFORM_CC770_H_
+#define _CAN_PLATFORM_CC770_H_
+
+/* CPU Interface Register (0x02) */
+#define CPUIF_CEN      0x01    /* Clock Out Enable */
+#define CPUIF_MUX      0x04    /* Multiplex */
+#define CPUIF_SLP      0x08    /* Sleep */
+#define CPUIF_PWD      0x10    /* Power Down Mode */
+#define CPUIF_DMC      0x20    /* Divide Memory Clock */
+#define CPUIF_DSC      0x40    /* Divide System Clock */
+#define CPUIF_RST      0x80    /* Hardware Reset Status */
+
+/* Clock Out Register (0x1f) */
+#define CLKOUT_CD_MASK  0x0f   /* Clock Divider mask */
+#define CLKOUT_SL_MASK 0x30    /* Slew Rate mask */
+#define CLKOUT_SL_SHIFT        4
+
+/* Bus Configuration Register (0x2f) */
+#define BUSCFG_DR0     0x01    /* Disconnect RX0 Input / Select RX input */
+#define BUSCFG_DR1     0x02    /* Disconnect RX1 Input / Silent mode */
+#define BUSCFG_DT1     0x08    /* Disconnect TX1 Output */
+#define BUSCFG_POL     0x20    /* Polarity dominant or recessive */
+#define BUSCFG_CBY     0x40    /* Input Comparator Bypass */
+
+struct cc770_platform_data {
+       u32 osc_freq;   /* CAN bus oscillator frequency in Hz */
+
+       u8 cir;         /* CPU Interface Register */
+       u8 cor;         /* Clock Out Register */
+       u8 bcr;         /* Bus Configuration Register */
+};
+
+#endif /* !_CAN_PLATFORM_CC770_H_ */
index ac663c1..0bd390c 100644 (file)
@@ -59,8 +59,16 @@ SUBSYS(net_cls)
 SUBSYS(blkio)
 #endif
 
+/* */
+
 #ifdef CONFIG_CGROUP_PERF
 SUBSYS(perf)
 #endif
 
 /* */
+
+#ifdef CONFIG_NETPRIO_CGROUP
+SUBSYS(net_prio)
+#endif
+
+/* */
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
new file mode 100644 (file)
index 0000000..5621547
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Dynamic queue limits (dql) - Definitions
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ *
+ * This header file contains the definitions for dynamic queue limits (dql).
+ * dql would be used in conjunction with a producer/consumer type queue
+ * (possibly a HW queue).  Such a queue would have these general properties:
+ *
+ *   1) Objects are queued up to some limit specified as number of objects.
+ *   2) Periodically a completion process executes which retires consumed
+ *      objects.
+ *   3) Starvation occurs when limit has been reached, all queued data has
+ *      actually been consumed, but completion processing has not yet run
+ *      so queuing new data is blocked.
+ *   4) Minimizing the amount of queued data is desirable.
+ *
+ * The goal of dql is to calculate the limit as the minimum number of objects
+ * needed to prevent starvation.
+ *
+ * The primary functions of dql are:
+ *    dql_queued - called when objects are enqueued to record number of objects
+ *    dql_avail - returns how many objects are available to be queued based
+ *      on the object limit and how many objects are already enqueued
+ *    dql_completed - called at completion time to indicate how many objects
+ *      were retired from the queue
+ *
+ * The dql implementation does not implement any locking for the dql data
+ * structures, the higher layer should provide this.  dql_queued should
+ * be serialized to prevent concurrent execution of the function; this
+ * is also true for  dql_completed.  However, dql_queued and dlq_completed  can
+ * be executed concurrently (i.e. they can be protected by different locks).
+ */
+
+#ifndef _LINUX_DQL_H
+#define _LINUX_DQL_H
+
+#ifdef __KERNEL__
+
+struct dql {
+       /* Fields accessed in enqueue path (dql_queued) */
+       unsigned int    num_queued;             /* Total ever queued */
+       unsigned int    adj_limit;              /* limit + num_completed */
+       unsigned int    last_obj_cnt;           /* Count at last queuing */
+
+       /* Fields accessed only by completion path (dql_completed) */
+
+       unsigned int    limit ____cacheline_aligned_in_smp; /* Current limit */
+       unsigned int    num_completed;          /* Total ever completed */
+
+       unsigned int    prev_ovlimit;           /* Previous over limit */
+       unsigned int    prev_num_queued;        /* Previous queue total */
+       unsigned int    prev_last_obj_cnt;      /* Previous queuing cnt */
+
+       unsigned int    lowest_slack;           /* Lowest slack found */
+       unsigned long   slack_start_time;       /* Time slacks seen */
+
+       /* Configuration */
+       unsigned int    max_limit;              /* Max limit */
+       unsigned int    min_limit;              /* Minimum limit */
+       unsigned int    slack_hold_time;        /* Time to measure slack */
+};
+
+/* Set some static maximums */
+#define DQL_MAX_OBJECT (UINT_MAX / 16)
+#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+
+/*
+ * Record number of objects queued. Assumes that caller has already checked
+ * availability in the queue with dql_avail.
+ */
+static inline void dql_queued(struct dql *dql, unsigned int count)
+{
+       BUG_ON(count > DQL_MAX_OBJECT);
+
+       dql->num_queued += count;
+       dql->last_obj_cnt = count;
+}
+
+/* Returns how many objects can be queued, < 0 indicates over limit. */
+static inline int dql_avail(const struct dql *dql)
+{
+       return dql->adj_limit - dql->num_queued;
+}
+
+/* Record number of completed objects and recalculate the limit. */
+void dql_completed(struct dql *dql, unsigned int count);
+
+/* Reset dql state */
+void dql_reset(struct dql *dql);
+
+/* Initialize dql state */
+int dql_init(struct dql *dql, unsigned hold_time);
+
+#endif /* _KERNEL_ */
+
+#endif /* _LINUX_DQL_H */
index c4627cb..e50f98b 100644 (file)
@@ -33,6 +33,7 @@
 #define PCI_EEPROM_WIDTH_93C86 8
 #define PCI_EEPROM_WIDTH_OPCODE        3
 #define PCI_EEPROM_WRITE_OPCODE        0x05
+#define PCI_EEPROM_ERASE_OPCODE 0x07
 #define PCI_EEPROM_READ_OPCODE 0x06
 #define PCI_EEPROM_EWDS_OPCODE 0x10
 #define PCI_EEPROM_EWEN_OPCODE 0x13
@@ -46,6 +47,7 @@
  * @register_write(struct eeprom_93cx6 *eeprom): handler to
  * write to the eeprom register by using all reg_* fields.
  * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @drive_data: Set if we're driving the data line.
  * @reg_data_in: register field to indicate data input
  * @reg_data_out: register field to indicate data output
  * @reg_data_clock: register field to set the data clock
@@ -62,6 +64,7 @@ struct eeprom_93cx6 {
 
        int width;
 
+       char drive_data;
        char reg_data_in;
        char reg_data_out;
        char reg_data_clock;
@@ -72,3 +75,8 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
        const u8 word, u16 *data);
 extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
        const u8 word, __le16 *data, const u16 words);
+
+extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
+
+extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
+                              u8 addr, u16 data);
index 034072c..fd0628b 100644 (file)
@@ -17,14 +17,15 @@ struct sock_extended_err {
 #define SO_EE_ORIGIN_LOCAL     1
 #define SO_EE_ORIGIN_ICMP      2
 #define SO_EE_ORIGIN_ICMP6     3
-#define SO_EE_ORIGIN_TIMESTAMPING 4
+#define SO_EE_ORIGIN_TXSTATUS  4
+#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
 
 #define SO_EE_OFFENDER(ee)     ((struct sockaddr*)((ee)+1))
 
 #ifdef __KERNEL__
 
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -33,7 +34,7 @@ struct sock_extended_err {
 struct sock_exterr_skb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index de33de1..20db5b2 100644 (file)
@@ -724,9 +724,6 @@ enum ethtool_sfeatures_retval_bits {
 
 #include <linux/rculist.h>
 
-/* needed by dev_disable_lro() */
-extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
-
 extern int __ethtool_get_settings(struct net_device *dev,
                                  struct ethtool_cmd *cmd);
 
@@ -750,19 +747,6 @@ struct net_device;
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_tx_csum(struct net_device *dev);
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data);
-u32 ethtool_op_get_sg(struct net_device *dev);
-int ethtool_op_set_sg(struct net_device *dev, u32 data);
-u32 ethtool_op_get_tso(struct net_device *dev);
-int ethtool_op_set_tso(struct net_device *dev, u32 data);
-u32 ethtool_op_get_ufo(struct net_device *dev);
-int ethtool_op_set_ufo(struct net_device *dev, u32 data);
-u32 ethtool_op_get_flags(struct net_device *dev);
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
 
 /**
  * struct ethtool_ops - optional netdev operations
@@ -807,22 +791,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
  * @get_pauseparam: Report pause parameters
  * @set_pauseparam: Set pause parameters.  Returns a negative error code
  *     or zero.
- * @get_rx_csum: Deprecated in favour of the netdev feature %NETIF_F_RXCSUM.
- *     Report whether receive checksums are turned on or off.
- * @set_rx_csum: Deprecated in favour of generic netdev features.  Turn
- *     receive checksum on or off.  Returns a negative error code or zero.
- * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
- *     are turned on or off.
- * @set_tx_csum: Deprecated in favour of generic netdev features.  Turn
- *     transmit checksums on or off.  Returns a negative error code or zero.
- * @get_sg: Deprecated as redundant.  Report whether scatter-gather is
- *     enabled.  
- * @set_sg: Deprecated in favour of generic netdev features.  Turn
- *     scatter-gather on or off. Returns a negative error code or zero.
- * @get_tso: Deprecated as redundant.  Report whether TCP segmentation
- *     offload is enabled.
- * @set_tso: Deprecated in favour of generic netdev features.  Turn TCP
- *     segmentation offload on or off.  Returns a negative error code or zero.
  * @self_test: Run specified self-tests
  * @get_strings: Return a set of strings that describe the requested objects
  * @set_phys_id: Identify the physical devices, e.g. by flashing an LED
@@ -844,15 +812,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
  *     negative error code or zero.
  * @complete: Function to be called after any other operation except
  *     @begin.  Will be called even if the other operation failed.
- * @get_ufo: Deprecated as redundant.  Report whether UDP fragmentation
- *     offload is enabled.
- * @set_ufo: Deprecated in favour of generic netdev features.  Turn UDP
- *     fragmentation offload on or off.  Returns a negative error code or zero.
- * @get_flags: Deprecated as redundant.  Report features included in
- *     &enum ethtool_flags that are enabled.  
- * @set_flags: Deprecated in favour of generic netdev features.  Turn
- *     features included in &enum ethtool_flags on or off.  Returns a
- *     negative error code or zero.
  * @get_priv_flags: Report driver-specific feature flags.
  * @set_priv_flags: Set driver-specific feature flags.  Returns a negative
  *     error code or zero.
@@ -917,14 +876,6 @@ struct ethtool_ops {
                                  struct ethtool_pauseparam*);
        int     (*set_pauseparam)(struct net_device *,
                                  struct ethtool_pauseparam*);
-       u32     (*get_rx_csum)(struct net_device *);
-       int     (*set_rx_csum)(struct net_device *, u32);
-       u32     (*get_tx_csum)(struct net_device *);
-       int     (*set_tx_csum)(struct net_device *, u32);
-       u32     (*get_sg)(struct net_device *);
-       int     (*set_sg)(struct net_device *, u32);
-       u32     (*get_tso)(struct net_device *);
-       int     (*set_tso)(struct net_device *, u32);
        void    (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
        void    (*get_strings)(struct net_device *, u32 stringset, u8 *);
        int     (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
@@ -932,10 +883,6 @@ struct ethtool_ops {
                                     struct ethtool_stats *, u64 *);
        int     (*begin)(struct net_device *);
        void    (*complete)(struct net_device *);
-       u32     (*get_ufo)(struct net_device *);
-       int     (*set_ufo)(struct net_device *, u32);
-       u32     (*get_flags)(struct net_device *);
-       int     (*set_flags)(struct net_device *, u32);
        u32     (*get_priv_flags)(struct net_device *);
        int     (*set_priv_flags)(struct net_device *, u32);
        int     (*get_sset_count)(struct net_device *, int);
index 61549b2..73c28de 100644 (file)
@@ -85,6 +85,30 @@ enum {
 /* All generic netlink requests are serialized by a global lock.  */
 extern void genl_lock(void);
 extern void genl_unlock(void);
+#ifdef CONFIG_PROVE_LOCKING
+extern int lockdep_genl_is_held(void);
+#endif
+
+/**
+ * rcu_dereference_genl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_genl(p)                                        \
+       rcu_dereference_check(p, lockdep_genl_is_held())
+
+/**
+ * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds genl mutex.
+ */
+#define genl_dereference(p)                                    \
+       rcu_dereference_protected(p, lockdep_genl_is_held())
 
 #endif /* __KERNEL__ */
 
index 48363c3..17f2a76 100644 (file)
 #define IEEE80211_QOS_CTL_ACK_POLICY_NOACK     0x0020
 #define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL   0x0040
 #define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK  0x0060
+#define IEEE80211_QOS_CTL_ACK_POLICY_MASK      0x0060
 /* A-MSDU 802.11n */
 #define IEEE80211_QOS_CTL_A_MSDU_PRESENT       0x0080
 /* Mesh Control 802.11s */
@@ -770,6 +771,9 @@ struct ieee80211_mgmt {
        } u;
 } __attribute__ ((packed));
 
+/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
+
 /* mgmt header + 1 byte category code */
 #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
 
@@ -1552,6 +1556,8 @@ enum ieee80211_sa_query_action {
 #define WLAN_CIPHER_SUITE_WEP104       0x000FAC05
 #define WLAN_CIPHER_SUITE_AES_CMAC     0x000FAC06
 
+#define WLAN_CIPHER_SUITE_SMS4         0x00147201
+
 /* AKM suite selectors */
 #define WLAN_AKM_SUITE_8021X           0x000FAC01
 #define WLAN_AKM_SUITE_PSK             0x000FAC02
@@ -1689,6 +1695,23 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
 }
 
 /**
+ * ieee80211_is_public_action - check if frame is a public action frame
+ * @hdr: the frame
+ * @len: length of the frame
+ */
+static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
+                                             size_t len)
+{
+       struct ieee80211_mgmt *mgmt = (void *)hdr;
+
+       if (len < IEEE80211_MIN_ACTION_SIZE)
+               return false;
+       if (!ieee80211_is_action(hdr->frame_control))
+               return false;
+       return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
+}
+
+/**
  * ieee80211_fhss_chan_to_freq - get channel frequency
  * @channel: the FHSS channel
  *
index db20bd4..06b6ef6 100644 (file)
@@ -79,6 +79,7 @@
 #define IFF_TX_SKB_SHARING     0x10000 /* The interface supports sharing
                                         * skbs on transmit */
 #define IFF_UNICAST_FLT        0x20000         /* Supports unicast filtering   */
+#define IFF_TEAM_PORT  0x40000         /* device used as team port */
 
 #define IF_GET_IFACE   0x0001          /* for querying only */
 #define IF_GET_PROTO   0x0002
index e473003..56d907a 100644 (file)
@@ -79,6 +79,7 @@
 #define ETH_P_PAE      0x888E          /* Port Access Entity (IEEE 802.1X) */
 #define ETH_P_AOE      0x88A2          /* ATA over Ethernet            */
 #define ETH_P_8021AD   0x88A8          /* 802.1ad Service VLAN         */
+#define ETH_P_802_EX1  0x88B5          /* 802.1 Local Experimental 1.  */
 #define ETH_P_TIPC     0x88CA          /* TIPC                         */
 #define ETH_P_8021AH   0x88E7          /* 802.1ah Backbone Service Tag */
 #define ETH_P_1588     0x88F7          /* IEEE 1588 Timesync */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
new file mode 100644 (file)
index 0000000..828181f
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * include/linux/if_team.h - Network team device driver header
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_IF_TEAM_H_
+#define _LINUX_IF_TEAM_H_
+
+#ifdef __KERNEL__
+
+struct team_pcpu_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     rx_multicast;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+       u32                     rx_dropped;
+       u32                     tx_dropped;
+};
+
+struct team;
+
+struct team_port {
+       struct net_device *dev;
+       struct hlist_node hlist; /* node in hash list */
+       struct list_head list; /* node in ordinary list */
+       struct team *team;
+       int index;
+
+       /*
+        * A place for storing original values of the device before it
+        * become a port.
+        */
+       struct {
+               unsigned char dev_addr[MAX_ADDR_LEN];
+               unsigned int mtu;
+       } orig;
+
+       bool linkup;
+       u32 speed;
+       u8 duplex;
+
+       struct rcu_head rcu;
+};
+
+struct team_mode_ops {
+       int (*init)(struct team *team);
+       void (*exit)(struct team *team);
+       rx_handler_result_t (*receive)(struct team *team,
+                                      struct team_port *port,
+                                      struct sk_buff *skb);
+       bool (*transmit)(struct team *team, struct sk_buff *skb);
+       int (*port_enter)(struct team *team, struct team_port *port);
+       void (*port_leave)(struct team *team, struct team_port *port);
+       void (*port_change_mac)(struct team *team, struct team_port *port);
+};
+
+enum team_option_type {
+       TEAM_OPTION_TYPE_U32,
+       TEAM_OPTION_TYPE_STRING,
+};
+
+struct team_option {
+       struct list_head list;
+       const char *name;
+       enum team_option_type type;
+       int (*getter)(struct team *team, void *arg);
+       int (*setter)(struct team *team, void *arg);
+};
+
+struct team_mode {
+       struct list_head list;
+       const char *kind;
+       struct module *owner;
+       size_t priv_size;
+       const struct team_mode_ops *ops;
+};
+
+#define TEAM_PORT_HASHBITS 4
+#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
+
+#define TEAM_MODE_PRIV_LONGS 4
+#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
+
+struct team {
+       struct net_device *dev; /* associated netdevice */
+       struct team_pcpu_stats __percpu *pcpu_stats;
+
+       struct mutex lock; /* used for overall locking, e.g. port lists write */
+
+       /*
+        * port lists with port count
+        */
+       int port_count;
+       struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES];
+       struct list_head port_list;
+
+       struct list_head option_list;
+
+       const struct team_mode *mode;
+       struct team_mode_ops ops;
+       long mode_priv[TEAM_MODE_PRIV_LONGS];
+};
+
+static inline struct hlist_head *team_port_index_hash(struct team *team,
+                                                     int port_index)
+{
+       return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+}
+
+static inline struct team_port *team_get_port_by_index(struct team *team,
+                                                      int port_index)
+{
+       struct hlist_node *p;
+       struct team_port *port;
+       struct hlist_head *head = team_port_index_hash(team, port_index);
+
+       hlist_for_each_entry(port, p, head, hlist)
+               if (port->index == port_index)
+                       return port;
+       return NULL;
+}
+static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
+                                                          int port_index)
+{
+       struct hlist_node *p;
+       struct team_port *port;
+       struct hlist_head *head = team_port_index_hash(team, port_index);
+
+       hlist_for_each_entry_rcu(port, p, head, hlist)
+               if (port->index == port_index)
+                       return port;
+       return NULL;
+}
+
+extern int team_port_set_team_mac(struct team_port *port);
+extern int team_options_register(struct team *team,
+                                const struct team_option *option,
+                                size_t option_count);
+extern void team_options_unregister(struct team *team,
+                                   const struct team_option *option,
+                                   size_t option_count);
+extern int team_mode_register(struct team_mode *mode);
+extern int team_mode_unregister(struct team_mode *mode);
+
+#endif /* __KERNEL__ */
+
+#define TEAM_STRING_MAX_LEN 32
+
+/**********************************
+ * NETLINK_GENERIC netlink family.
+ **********************************/
+
+enum {
+       TEAM_CMD_NOOP,
+       TEAM_CMD_OPTIONS_SET,
+       TEAM_CMD_OPTIONS_GET,
+       TEAM_CMD_PORT_LIST_GET,
+
+       __TEAM_CMD_MAX,
+       TEAM_CMD_MAX = (__TEAM_CMD_MAX - 1),
+};
+
+enum {
+       TEAM_ATTR_UNSPEC,
+       TEAM_ATTR_TEAM_IFINDEX,         /* u32 */
+       TEAM_ATTR_LIST_OPTION,          /* nest */
+       TEAM_ATTR_LIST_PORT,            /* nest */
+
+       __TEAM_ATTR_MAX,
+       TEAM_ATTR_MAX = __TEAM_ATTR_MAX - 1,
+};
+
+/* Nested layout of get/set msg:
+ *
+ *     [TEAM_ATTR_LIST_OPTION]
+ *             [TEAM_ATTR_ITEM_OPTION]
+ *                     [TEAM_ATTR_OPTION_*], ...
+ *             [TEAM_ATTR_ITEM_OPTION]
+ *                     [TEAM_ATTR_OPTION_*], ...
+ *             ...
+ *     [TEAM_ATTR_LIST_PORT]
+ *             [TEAM_ATTR_ITEM_PORT]
+ *                     [TEAM_ATTR_PORT_*], ...
+ *             [TEAM_ATTR_ITEM_PORT]
+ *                     [TEAM_ATTR_PORT_*], ...
+ *             ...
+ */
+
+enum {
+       TEAM_ATTR_ITEM_OPTION_UNSPEC,
+       TEAM_ATTR_ITEM_OPTION,          /* nest */
+
+       __TEAM_ATTR_ITEM_OPTION_MAX,
+       TEAM_ATTR_ITEM_OPTION_MAX = __TEAM_ATTR_ITEM_OPTION_MAX - 1,
+};
+
+enum {
+       TEAM_ATTR_OPTION_UNSPEC,
+       TEAM_ATTR_OPTION_NAME,          /* string */
+       TEAM_ATTR_OPTION_CHANGED,       /* flag */
+       TEAM_ATTR_OPTION_TYPE,          /* u8 */
+       TEAM_ATTR_OPTION_DATA,          /* dynamic */
+
+       __TEAM_ATTR_OPTION_MAX,
+       TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
+};
+
+enum {
+       TEAM_ATTR_ITEM_PORT_UNSPEC,
+       TEAM_ATTR_ITEM_PORT,            /* nest */
+
+       __TEAM_ATTR_ITEM_PORT_MAX,
+       TEAM_ATTR_ITEM_PORT_MAX = __TEAM_ATTR_ITEM_PORT_MAX - 1,
+};
+
+enum {
+       TEAM_ATTR_PORT_UNSPEC,
+       TEAM_ATTR_PORT_IFINDEX,         /* u32 */
+       TEAM_ATTR_PORT_CHANGED,         /* flag */
+       TEAM_ATTR_PORT_LINKUP,          /* flag */
+       TEAM_ATTR_PORT_SPEED,           /* u32 */
+       TEAM_ATTR_PORT_DUPLEX,          /* u8 */
+
+       __TEAM_ATTR_PORT_MAX,
+       TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
+};
+
+/*
+ * NETLINK_GENERIC related info
+ */
+#define TEAM_GENL_NAME "team"
+#define TEAM_GENL_VERSION 0x1
+#define TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME "change_event"
+
+#endif /* _LINUX_IF_TEAM_H_ */
index 12d5543..13aff1e 100644 (file)
@@ -74,22 +74,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
 /* found in socket.c */
 extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
 
-/* if this changes, algorithm will have to be reworked because this
- * depends on completely exhausting the VLAN identifier space.  Thus
- * it gives constant time look-up, but in many cases it wastes memory.
- */
-#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
-#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
-
-struct vlan_group {
-       struct net_device       *real_dev; /* The ethernet(like) device
-                                           * the vlan is attached to.
-                                           */
-       unsigned int            nr_vlans;
-       struct hlist_node       hlist;  /* linked list */
-       struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
-       struct rcu_head         rcu;
-};
+struct vlan_info;
 
 static inline int is_vlan_dev(struct net_device *dev)
 {
@@ -109,6 +94,13 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
 extern struct sk_buff *vlan_untag(struct sk_buff *skb);
 
+extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+extern void vlan_vid_del(struct net_device *dev, unsigned short vid);
+
+extern int vlan_vids_add_by_dev(struct net_device *dev,
+                               const struct net_device *by_dev);
+extern void vlan_vids_del_by_dev(struct net_device *dev,
+                                const struct net_device *by_dev);
 #else
 static inline struct net_device *
 __vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id)
@@ -139,6 +131,26 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb)
 {
        return skb;
 }
+
+static inline int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+       return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+}
+
+static inline int vlan_vids_add_by_dev(struct net_device *dev,
+                                      const struct net_device *by_dev)
+{
+       return 0;
+}
+
+static inline void vlan_vids_del_by_dev(struct net_device *dev,
+                                       const struct net_device *by_dev)
+{
+}
 #endif
 
 /**
@@ -310,6 +322,40 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 
        return protocol;
 }
+
+static inline void vlan_set_encap_proto(struct sk_buff *skb,
+                                       struct vlan_hdr *vhdr)
+{
+       __be16 proto;
+       unsigned char *rawp;
+
+       /*
+        * Was a VLAN packet, grab the encapsulated protocol, which the layer
+        * three protocols care about.
+        */
+
+       proto = vhdr->h_vlan_encapsulated_proto;
+       if (ntohs(proto) >= 1536) {
+               skb->protocol = proto;
+               return;
+       }
+
+       rawp = skb->data;
+       if (*(unsigned short *) rawp == 0xFFFF)
+               /*
+                * This is a magic hack to spot IPX packets. Older Novell
+                * breaks the protocol design and runs IPX over 802.3 without
+                * an 802.2 LLC layer. We look for FFFF which isn't a used
+                * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
+                * but does for the rest.
+                */
+               skb->protocol = htons(ETH_P_802_3);
+       else
+               /*
+                * Real 802.2 LLC
+                */
+               skb->protocol = htons(ETH_P_802_2);
+}
 #endif /* __KERNEL__ */
 
 /* VLAN IOCTLs are found in sockios.h */
@@ -352,7 +398,7 @@ struct vlan_ioctl_args {
                unsigned int skb_priority;
                unsigned int name_type;
                unsigned int bind_type;
-               unsigned int flag; /* Matches vlan_dev_info flags */
+               unsigned int flag; /* Matches vlan_dev_priv flags */
         } u;
 
        short vlan_qos;   
index abf5028..78972a1 100644 (file)
@@ -6,6 +6,7 @@
 /* Just some random number */
 #define TCPDIAG_GETSOCK 18
 #define DCCPDIAG_GETSOCK 19
+#define SOCK_DIAG_BY_FAMILY 20
 
 #define INET_DIAG_GETSOCK_MAX 24
 
@@ -22,7 +23,7 @@ struct inet_diag_sockid {
 
 /* Request structure */
 
-struct inet_diag_req {
+struct inet_diag_req_compat {
        __u8    idiag_family;           /* Family of addresses. */
        __u8    idiag_src_len;
        __u8    idiag_dst_len;
@@ -34,6 +35,15 @@ struct inet_diag_req {
        __u32   idiag_dbs;              /* Tables to dump (NI) */
 };
 
+struct inet_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+       __u8    idiag_ext;
+       __u8    pad;
+       __u32   idiag_states;
+       struct inet_diag_sockid id;
+};
+
 enum {
        INET_DIAG_REQ_NONE,
        INET_DIAG_REQ_BYTECODE,
@@ -125,16 +135,42 @@ struct tcpvegas_info {
 #ifdef __KERNEL__
 struct sock;
 struct inet_hashinfo;
+struct nlattr;
+struct nlmsghdr;
+struct sk_buff;
+struct netlink_callback;
 
 struct inet_diag_handler {
-       struct inet_hashinfo    *idiag_hashinfo;
+       void                    (*dump)(struct sk_buff *skb,
+                                       struct netlink_callback *cb,
+                                       struct inet_diag_req *r,
+                                       struct nlattr *bc);
+
+       int                     (*dump_one)(struct sk_buff *in_skb,
+                                       const struct nlmsghdr *nlh,
+                                       struct inet_diag_req *req);
+
        void                    (*idiag_get_info)(struct sock *sk,
                                                  struct inet_diag_msg *r,
                                                  void *info);
-       __u16                   idiag_info_size;
        __u16                   idiag_type;
 };
 
+struct inet_connection_sock;
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh);
+void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *r,
+               struct nlattr *bc);
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+               struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req);
+
+int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+int inet_diag_check_cookie(struct sock *sk, struct inet_diag_req *req);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* __KERNEL__ */
index 0c99776..6318268 100644 (file)
@@ -404,7 +404,7 @@ struct tcp6_sock {
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
        return inet_sk(__sk)->pinet6;
@@ -515,7 +515,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
 #define inet6_rcv_saddr(__sk)  NULL
 #define tcp_twsk_ipv6only(__sk)                0
 #define inet_v6_ipv6only(__sk)         0
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __hash, __saddr, __daddr, __ports, __dif)\
        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   && \
index ff9abff..90b0656 100644 (file)
@@ -301,7 +301,7 @@ static inline int __nlm_privileged_request4(const struct sockaddr *sap)
        return ipv4_is_loopback(sin->sin_addr.s_addr);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
        const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
@@ -314,12 +314,12 @@ static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 
        return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
 }
-#else  /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#else  /* IS_ENABLED(CONFIG_IPV6) */
 static inline int __nlm_privileged_request6(const struct sockaddr *sap)
 {
        return 0;
 }
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Ensure incoming requests are from local privileged callers.
index 0fe00cd..76f52bb 100644 (file)
@@ -32,6 +32,8 @@ struct mdiobb_ops {
 
 struct mdiobb_ctrl {
        const struct mdiobb_ops *ops;
+       /* reset callback */
+       int (*reset)(struct mii_bus *bus);
 };
 
 /* The returned bus is not yet registered with the phy layer. */
index e9d3fdf..7c9fe3c 100644 (file)
@@ -20,6 +20,8 @@ struct mdio_gpio_platform_data {
 
        unsigned int phy_mask;
        int irqs[PHY_MAX_ADDR];
+       /* reset callback */
+       int (*reset)(struct mii_bus *bus);
 };
 
 #endif /* __LINUX_MDIO_GPIO_H */
index b87068a..9b296ea 100644 (file)
@@ -85,6 +85,9 @@ extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
+extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+
 static inline
 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
 {
@@ -381,5 +384,25 @@ mem_cgroup_print_bad_page(struct page *page)
 }
 #endif
 
+enum {
+       UNDER_LIMIT,
+       SOFT_LIMIT,
+       OVER_LIMIT,
+};
+
+#ifdef CONFIG_INET
+struct sock;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+void sock_update_memcg(struct sock *sk);
+void sock_release_memcg(struct sock *sk);
+#else
+static inline void sock_update_memcg(struct sock *sk)
+{
+}
+static inline void sock_release_memcg(struct sock *sk)
+{
+}
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#endif /* CONFIG_INET */
 #endif /* _LINUX_MEMCONTROL_H */
 
index 2774823..2783eca 100644 (file)
@@ -9,6 +9,7 @@
 #define __LINUX_MII_H__
 
 #include <linux/types.h>
+#include <linux/ethtool.h>
 
 /* Generic MII registers. */
 #define MII_BMCR               0x00    /* Basic mode control register */
@@ -240,6 +241,205 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
 }
 
 /**
+ * ethtool_adv_to_mii_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
+{
+       u32 result = 0;
+
+       if (ethadv & ADVERTISED_10baseT_Half)
+               result |= ADVERTISE_10HALF;
+       if (ethadv & ADVERTISED_10baseT_Full)
+               result |= ADVERTISE_10FULL;
+       if (ethadv & ADVERTISED_100baseT_Half)
+               result |= ADVERTISE_100HALF;
+       if (ethadv & ADVERTISED_100baseT_Full)
+               result |= ADVERTISE_100FULL;
+       if (ethadv & ADVERTISED_Pause)
+               result |= ADVERTISE_PAUSE_CAP;
+       if (ethadv & ADVERTISED_Asym_Pause)
+               result |= ADVERTISE_PAUSE_ASYM;
+
+       return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_t
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to ethtool advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
+{
+       u32 result = 0;
+
+       if (adv & ADVERTISE_10HALF)
+               result |= ADVERTISED_10baseT_Half;
+       if (adv & ADVERTISE_10FULL)
+               result |= ADVERTISED_10baseT_Full;
+       if (adv & ADVERTISE_100HALF)
+               result |= ADVERTISED_100baseT_Half;
+       if (adv & ADVERTISE_100FULL)
+               result |= ADVERTISED_100baseT_Full;
+       if (adv & ADVERTISE_PAUSE_CAP)
+               result |= ADVERTISED_Pause;
+       if (adv & ADVERTISE_PAUSE_ASYM)
+               result |= ADVERTISED_Asym_Pause;
+
+       return result;
+}
+
+/**
+ * ethtool_adv_to_mii_ctrl1000_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
+{
+       u32 result = 0;
+
+       if (ethadv & ADVERTISED_1000baseT_Half)
+               result |= ADVERTISE_1000HALF;
+       if (ethadv & ADVERTISED_1000baseT_Full)
+               result |= ADVERTISE_1000FULL;
+
+       return result;
+}
+
+/**
+ * mii_ctrl1000_to_ethtool_adv_t
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
+{
+       u32 result = 0;
+
+       if (adv & ADVERTISE_1000HALF)
+               result |= ADVERTISED_1000baseT_Half;
+       if (adv & ADVERTISE_1000FULL)
+               result |= ADVERTISED_1000baseT_Full;
+
+       return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-T mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
+{
+       u32 result = 0;
+
+       if (lpa & LPA_LPACK)
+               result |= ADVERTISED_Autoneg;
+
+       return result | mii_adv_to_ethtool_adv_t(lpa);
+}
+
+/**
+ * mii_stat1000_to_ethtool_lpa_t
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
+{
+       u32 result = 0;
+
+       if (lpa & LPA_1000HALF)
+               result |= ADVERTISED_1000baseT_Half;
+       if (lpa & LPA_1000FULL)
+               result |= ADVERTISED_1000baseT_Full;
+
+       return result;
+}
+
+/**
+ * ethtool_adv_to_mii_adv_x
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000Base-X mode.
+ */
+static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
+{
+       u32 result = 0;
+
+       if (ethadv & ADVERTISED_1000baseT_Half)
+               result |= ADVERTISE_1000XHALF;
+       if (ethadv & ADVERTISED_1000baseT_Full)
+               result |= ADVERTISE_1000XFULL;
+       if (ethadv & ADVERTISED_Pause)
+               result |= ADVERTISE_1000XPAUSE;
+       if (ethadv & ADVERTISED_Asym_Pause)
+               result |= ADVERTISE_1000XPSE_ASYM;
+
+       return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_x
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-X mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
+{
+       u32 result = 0;
+
+       if (adv & ADVERTISE_1000XHALF)
+               result |= ADVERTISED_1000baseT_Half;
+       if (adv & ADVERTISE_1000XFULL)
+               result |= ADVERTISED_1000baseT_Full;
+       if (adv & ADVERTISE_1000XPAUSE)
+               result |= ADVERTISED_Pause;
+       if (adv & ADVERTISE_1000XPSE_ASYM)
+               result |= ADVERTISED_Asym_Pause;
+
+       return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_x
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-X mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+{
+       u32 result = 0;
+
+       if (lpa & LPA_LPACK)
+               result |= ADVERTISED_Autoneg;
+
+       return result | mii_adv_to_ethtool_adv_x(lpa);
+}
+
+/**
  * mii_advertise_flowctrl - get flow control advertisement flags
  * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
  */
index b56e458..9958ff2 100644 (file)
@@ -59,12 +59,15 @@ enum {
        MLX4_CMD_HW_HEALTH_CHECK = 0x50,
        MLX4_CMD_SET_PORT        = 0xc,
        MLX4_CMD_SET_NODE        = 0x5a,
+       MLX4_CMD_QUERY_FUNC      = 0x56,
        MLX4_CMD_ACCESS_DDR      = 0x2e,
        MLX4_CMD_MAP_ICM         = 0xffa,
        MLX4_CMD_UNMAP_ICM       = 0xff9,
        MLX4_CMD_MAP_ICM_AUX     = 0xffc,
        MLX4_CMD_UNMAP_ICM_AUX   = 0xffb,
        MLX4_CMD_SET_ICM_SIZE    = 0xffd,
+       /*master notify fw on finish for slave's flr*/
+       MLX4_CMD_INFORM_FLR_DONE = 0x5b,
 
        /* TPT commands */
        MLX4_CMD_SW2HW_MPT       = 0xd,
@@ -119,6 +122,26 @@ enum {
        /* miscellaneous commands */
        MLX4_CMD_DIAG_RPRT       = 0x30,
        MLX4_CMD_NOP             = 0x31,
+       MLX4_CMD_ACCESS_MEM      = 0x2e,
+       MLX4_CMD_SET_VEP         = 0x52,
+
+       /* Ethernet specific commands */
+       MLX4_CMD_SET_VLAN_FLTR   = 0x47,
+       MLX4_CMD_SET_MCAST_FLTR  = 0x48,
+       MLX4_CMD_DUMP_ETH_STATS  = 0x49,
+
+       /* Communication channel commands */
+       MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
+       MLX4_CMD_GEN_EQE         = 0x58,
+
+       /* virtual commands */
+       MLX4_CMD_ALLOC_RES       = 0xf00,
+       MLX4_CMD_FREE_RES        = 0xf01,
+       MLX4_CMD_MCAST_ATTACH    = 0xf05,
+       MLX4_CMD_UCAST_ATTACH    = 0xf06,
+       MLX4_CMD_PROMISC         = 0xf08,
+       MLX4_CMD_QUERY_FUNC_CAP  = 0xf0a,
+       MLX4_CMD_QP_ATTACH       = 0xf0b,
 
        /* debug commands */
        MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
@@ -126,6 +149,7 @@ enum {
 
        /* statistics commands */
        MLX4_CMD_QUERY_IF_STAT   = 0X54,
+       MLX4_CMD_SET_IF_STAT     = 0X55,
 };
 
 enum {
@@ -135,7 +159,8 @@ enum {
 };
 
 enum {
-       MLX4_MAILBOX_SIZE       =  4096
+       MLX4_MAILBOX_SIZE       = 4096,
+       MLX4_ACCESS_MEM_ALIGN   = 256,
 };
 
 enum {
@@ -148,6 +173,11 @@ enum {
        MLX4_SET_PORT_GID_TABLE = 0x5,
 };
 
+enum {
+       MLX4_CMD_WRAPPED,
+       MLX4_CMD_NATIVE
+};
+
 struct mlx4_dev;
 
 struct mlx4_cmd_mailbox {
@@ -157,23 +187,24 @@ struct mlx4_cmd_mailbox {
 
 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
               int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout);
+              u16 op, unsigned long timeout, int native);
 
 /* Invoke a command with no output parameter */
 static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
-                          u8 op_modifier, u16 op, unsigned long timeout)
+                          u8 op_modifier, u16 op, unsigned long timeout,
+                          int native)
 {
        return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 /* Invoke a command with an output mailbox */
 static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
                               u32 in_modifier, u8 op_modifier, u16 op,
-                              unsigned long timeout)
+                              unsigned long timeout, int native)
 {
        return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 /*
@@ -183,13 +214,17 @@ static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param
  */
 static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                               u32 in_modifier, u8 op_modifier, u16 op,
-                              unsigned long timeout)
+                              unsigned long timeout, int native)
 {
        return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
-                         op_modifier, op, timeout);
+                         op_modifier, op, timeout, native);
 }
 
 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
 
+u32 mlx4_comm_get_version(void);
+
+#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+
 #endif /* MLX4_CMD_H */
index 84b0b18..5f784ff 100644 (file)
@@ -47,6 +47,9 @@
 enum {
        MLX4_FLAG_MSI_X         = 1 << 0,
        MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
+       MLX4_FLAG_MASTER        = 1 << 2,
+       MLX4_FLAG_SLAVE         = 1 << 3,
+       MLX4_FLAG_SRIOV         = 1 << 4,
 };
 
 enum {
@@ -58,6 +61,15 @@ enum {
 };
 
 enum {
+       MLX4_MAX_NUM_PF         = 16,
+       MLX4_MAX_NUM_VF         = 64,
+       MLX4_MFUNC_MAX          = 80,
+       MLX4_MFUNC_EQ_NUM       = 4,
+       MLX4_MFUNC_MAX_EQES     = 8,
+       MLX4_MFUNC_EQE_MASK     = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
+enum {
        MLX4_DEV_CAP_FLAG_RC            = 1LL <<  0,
        MLX4_DEV_CAP_FLAG_UC            = 1LL <<  1,
        MLX4_DEV_CAP_FLAG_UD            = 1LL <<  2,
@@ -77,7 +89,8 @@ enum {
        MLX4_DEV_CAP_FLAG_IBOE          = 1LL << 30,
        MLX4_DEV_CAP_FLAG_UC_LOOPBACK   = 1LL << 32,
        MLX4_DEV_CAP_FLAG_FCS_KEEP      = 1LL << 34,
-       MLX4_DEV_CAP_FLAG_WOL           = 1LL << 38,
+       MLX4_DEV_CAP_FLAG_WOL_PORT1     = 1LL << 37,
+       MLX4_DEV_CAP_FLAG_WOL_PORT2     = 1LL << 38,
        MLX4_DEV_CAP_FLAG_UDP_RSS       = 1LL << 40,
        MLX4_DEV_CAP_FLAG_VEP_UC_STEER  = 1LL << 41,
        MLX4_DEV_CAP_FLAG_VEP_MC_STEER  = 1LL << 42,
@@ -116,7 +129,11 @@ enum mlx4_event {
        MLX4_EVENT_TYPE_PORT_CHANGE        = 0x09,
        MLX4_EVENT_TYPE_EQ_OVERFLOW        = 0x0f,
        MLX4_EVENT_TYPE_ECC_DETECT         = 0x0e,
-       MLX4_EVENT_TYPE_CMD                = 0x0a
+       MLX4_EVENT_TYPE_CMD                = 0x0a,
+       MLX4_EVENT_TYPE_VEP_UPDATE         = 0x19,
+       MLX4_EVENT_TYPE_COMM_CHANNEL       = 0x18,
+       MLX4_EVENT_TYPE_FLR_EVENT          = 0x1c,
+       MLX4_EVENT_TYPE_NONE               = 0xff,
 };
 
 enum {
@@ -183,6 +200,7 @@ enum mlx4_qp_region {
 };
 
 enum mlx4_port_type {
+       MLX4_PORT_TYPE_NONE     = 0,
        MLX4_PORT_TYPE_IB       = 1,
        MLX4_PORT_TYPE_ETH      = 2,
        MLX4_PORT_TYPE_AUTO     = 3
@@ -215,6 +233,7 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
 
 struct mlx4_caps {
        u64                     fw_ver;
+       u32                     function;
        int                     num_ports;
        int                     vl_cap[MLX4_MAX_PORTS + 1];
        int                     ib_mtu_cap[MLX4_MAX_PORTS + 1];
@@ -229,6 +248,7 @@ struct mlx4_caps {
        u64                     trans_code[MLX4_MAX_PORTS + 1];
        int                     local_ca_ack_delay;
        int                     num_uars;
+       u32                     uar_page_size;
        int                     bf_reg_size;
        int                     bf_regs_per_page;
        int                     max_sq_sg;
@@ -252,8 +272,7 @@ struct mlx4_caps {
        int                     num_comp_vectors;
        int                     comp_pool;
        int                     num_mpts;
-       int                     num_mtt_segs;
-       int                     mtts_per_seg;
+       int                     num_mtts;
        int                     fmr_reserved_mtts;
        int                     reserved_mtts;
        int                     reserved_mrws;
@@ -283,7 +302,7 @@ struct mlx4_caps {
        int                     log_num_prios;
        enum mlx4_port_type     port_type[MLX4_MAX_PORTS + 1];
        u8                      supported_type[MLX4_MAX_PORTS + 1];
-       u32                     port_mask;
+       u32                     port_mask[MLX4_MAX_PORTS + 1];
        enum mlx4_port_type     possible_type[MLX4_MAX_PORTS + 1];
        u32                     max_counters;
        u8                      ext_port_cap[MLX4_MAX_PORTS + 1];
@@ -303,7 +322,7 @@ struct mlx4_buf {
 };
 
 struct mlx4_mtt {
-       u32                     first_seg;
+       u32                     offset;
        int                     order;
        int                     page_shift;
 };
@@ -465,10 +484,12 @@ struct mlx4_counter {
 struct mlx4_dev {
        struct pci_dev         *pdev;
        unsigned long           flags;
+       unsigned long           num_slaves;
        struct mlx4_caps        caps;
        struct radix_tree_root  qp_table_tree;
        u8                      rev_id;
        char                    board_id[MLX4_BOARD_ID_LEN];
+       int                     num_vfs;
 };
 
 struct mlx4_init_port_param {
@@ -487,14 +508,32 @@ struct mlx4_init_port_param {
 
 #define mlx4_foreach_port(port, dev, type)                             \
        for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
-               if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
-                    ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+               if ((type) == (dev)->caps.port_mask[(port)])
 
-#define mlx4_foreach_ib_transport_port(port, dev)                      \
-       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)     \
-               if (((dev)->caps.port_mask & 1 << ((port) - 1)) ||      \
-                   ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+#define mlx4_foreach_ib_transport_port(port, dev)                         \
+       for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++)       \
+               if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
+                       ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
 
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+       return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
+{
+       return (qpn < dev->caps.sqp_start + 8);
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+       return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
+}
+
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+       return dev->flags & MLX4_FLAG_SLAVE;
+}
 
 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
                   struct mlx4_buf *buf);
@@ -560,6 +599,10 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
 
+int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                       int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                       enum mlx4_protocol prot);
 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol protocol);
 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
@@ -570,9 +613,11 @@ int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
 
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap);
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
 
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index 48cc4cb..bee8fa2 100644 (file)
@@ -97,6 +97,33 @@ enum {
        MLX4_QP_BIT_RIC                         = 1 <<  4,
 };
 
+enum {
+       MLX4_RSS_HASH_XOR                       = 0,
+       MLX4_RSS_HASH_TOP                       = 1,
+
+       MLX4_RSS_UDP_IPV6                       = 1 << 0,
+       MLX4_RSS_UDP_IPV4                       = 1 << 1,
+       MLX4_RSS_TCP_IPV6                       = 1 << 2,
+       MLX4_RSS_IPV6                           = 1 << 3,
+       MLX4_RSS_TCP_IPV4                       = 1 << 4,
+       MLX4_RSS_IPV4                           = 1 << 5,
+
+       /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
+       MLX4_RSS_OFFSET_IN_QPC_PRI_PATH         = 0x24,
+       /* offset of being RSS indirection QP within mlx4_qp_context.flags */
+       MLX4_RSS_QPC_FLAG_OFFSET                = 13,
+};
+
+struct mlx4_rss_context {
+       __be32                  base_qpn;
+       __be32                  default_qpn;
+       u16                     reserved;
+       u8                      hash_fn;
+       u8                      flags;
+       __be32                  rss_key[10];
+       __be32                  base_qpn_udp;
+};
+
 struct mlx4_qp_path {
        u8                      fl;
        u8                      reserved1[2];
@@ -183,6 +210,7 @@ struct mlx4_wqe_ctrl_seg {
         * [4]   IP checksum
         * [3:2] C (generate completion queue entry)
         * [1]   SE (solicited event)
+        * [0]   FL (force loopback)
         */
        __be32                  srcrb_flags;
        /*
index a7003b7..b188f68 100644 (file)
@@ -116,6 +116,7 @@ enum {
        NDTPA_PROXY_DELAY,              /* u64, msecs */
        NDTPA_PROXY_QLEN,               /* u32 */
        NDTPA_LOCKTIME,                 /* u64, msecs */
+       NDTPA_QUEUE_LENBYTES,           /* u32 */
        __NDTPA_MAX
 };
 #define NDTPA_MAX (__NDTPA_MAX - 1)
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
new file mode 100644 (file)
index 0000000..77f5202
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Network device features.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NETDEV_FEATURES_H
+#define _LINUX_NETDEV_FEATURES_H
+
+#include <linux/types.h>
+
+typedef u64 netdev_features_t;
+
+enum {
+       NETIF_F_SG_BIT,                 /* Scatter/gather IO. */
+       NETIF_F_IP_CSUM_BIT,            /* Can checksum TCP/UDP over IPv4. */
+       __UNUSED_NETIF_F_1,
+       NETIF_F_HW_CSUM_BIT,            /* Can checksum all the packets. */
+       NETIF_F_IPV6_CSUM_BIT,          /* Can checksum TCP/UDP over IPV6 */
+       NETIF_F_HIGHDMA_BIT,            /* Can DMA to high memory. */
+       NETIF_F_FRAGLIST_BIT,           /* Scatter/gather IO. */
+       NETIF_F_HW_VLAN_TX_BIT,         /* Transmit VLAN hw acceleration */
+       NETIF_F_HW_VLAN_RX_BIT,         /* Receive VLAN hw acceleration */
+       NETIF_F_HW_VLAN_FILTER_BIT,     /* Receive filtering on VLAN */
+       NETIF_F_VLAN_CHALLENGED_BIT,    /* Device cannot handle VLAN packets */
+       NETIF_F_GSO_BIT,                /* Enable software GSO. */
+       NETIF_F_LLTX_BIT,               /* LockLess TX - deprecated. Please */
+                                       /* do not use LLTX in new drivers */
+       NETIF_F_NETNS_LOCAL_BIT,        /* Does not change network namespaces */
+       NETIF_F_GRO_BIT,                /* Generic receive offload */
+       NETIF_F_LRO_BIT,                /* large receive offload */
+
+       /**/NETIF_F_GSO_SHIFT,          /* keep the order of SKB_GSO_* bits */
+       NETIF_F_TSO_BIT                 /* ... TCPv4 segmentation */
+               = NETIF_F_GSO_SHIFT,
+       NETIF_F_UFO_BIT,                /* ... UDPv4 fragmentation */
+       NETIF_F_GSO_ROBUST_BIT,         /* ... ->SKB_GSO_DODGY */
+       NETIF_F_TSO_ECN_BIT,            /* ... TCP ECN support */
+       NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
+       NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
+       NETIF_F_GSO_RESERVED1,          /* ... free (fill GSO_MASK to 8 bits) */
+       /**/NETIF_F_GSO_LAST,           /* [can't be last bit, see GSO_MASK] */
+       NETIF_F_GSO_RESERVED2           /* ... free (fill GSO_MASK to 8 bits) */
+               = NETIF_F_GSO_LAST,
+
+       NETIF_F_FCOE_CRC_BIT,           /* FCoE CRC32 */
+       NETIF_F_SCTP_CSUM_BIT,          /* SCTP checksum offload */
+       NETIF_F_FCOE_MTU_BIT,           /* Supports max FCoE MTU, 2158 bytes*/
+       NETIF_F_NTUPLE_BIT,             /* N-tuple filters supported */
+       NETIF_F_RXHASH_BIT,             /* Receive hashing offload */
+       NETIF_F_RXCSUM_BIT,             /* Receive checksumming offload */
+       NETIF_F_NOCACHE_COPY_BIT,       /* Use no-cache copyfromuser */
+       NETIF_F_LOOPBACK_BIT,           /* Enable loopback */
+
+       /*
+        * Add your fresh new feature above and remember to update
+        * netdev_features_strings[] in net/core/ethtool.c and maybe
+        * some feature mask #defines below. Please also describe it
+        * in Documentation/networking/netdev-features.txt.
+        */
+
+       /**/NETDEV_FEATURE_COUNT
+};
+
+/* copy'n'paste compression ;) */
+#define __NETIF_F_BIT(bit)     ((netdev_features_t)1 << (bit))
+#define __NETIF_F(name)                __NETIF_F_BIT(NETIF_F_##name##_BIT)
+
+#define NETIF_F_FCOE_CRC       __NETIF_F(FCOE_CRC)
+#define NETIF_F_FCOE_MTU       __NETIF_F(FCOE_MTU)
+#define NETIF_F_FRAGLIST       __NETIF_F(FRAGLIST)
+#define NETIF_F_FSO            __NETIF_F(FSO)
+#define NETIF_F_GRO            __NETIF_F(GRO)
+#define NETIF_F_GSO            __NETIF_F(GSO)
+#define NETIF_F_GSO_ROBUST     __NETIF_F(GSO_ROBUST)
+#define NETIF_F_HIGHDMA                __NETIF_F(HIGHDMA)
+#define NETIF_F_HW_CSUM                __NETIF_F(HW_CSUM)
+#define NETIF_F_HW_VLAN_FILTER __NETIF_F(HW_VLAN_FILTER)
+#define NETIF_F_HW_VLAN_RX     __NETIF_F(HW_VLAN_RX)
+#define NETIF_F_HW_VLAN_TX     __NETIF_F(HW_VLAN_TX)
+#define NETIF_F_IP_CSUM                __NETIF_F(IP_CSUM)
+#define NETIF_F_IPV6_CSUM      __NETIF_F(IPV6_CSUM)
+#define NETIF_F_LLTX           __NETIF_F(LLTX)
+#define NETIF_F_LOOPBACK       __NETIF_F(LOOPBACK)
+#define NETIF_F_LRO            __NETIF_F(LRO)
+#define NETIF_F_NETNS_LOCAL    __NETIF_F(NETNS_LOCAL)
+#define NETIF_F_NOCACHE_COPY   __NETIF_F(NOCACHE_COPY)
+#define NETIF_F_NTUPLE         __NETIF_F(NTUPLE)
+#define NETIF_F_RXCSUM         __NETIF_F(RXCSUM)
+#define NETIF_F_RXHASH         __NETIF_F(RXHASH)
+#define NETIF_F_SCTP_CSUM      __NETIF_F(SCTP_CSUM)
+#define NETIF_F_SG             __NETIF_F(SG)
+#define NETIF_F_TSO6           __NETIF_F(TSO6)
+#define NETIF_F_TSO_ECN                __NETIF_F(TSO_ECN)
+#define NETIF_F_TSO            __NETIF_F(TSO)
+#define NETIF_F_UFO            __NETIF_F(UFO)
+#define NETIF_F_VLAN_CHALLENGED        __NETIF_F(VLAN_CHALLENGED)
+
+/* Features valid for ethtool to change */
+/* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE   (NETIF_F_VLAN_CHALLENGED | \
+                                NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+
+/* remember that ((t)1 << t_BITS) is undefined in C99 */
+#define NETIF_F_ETHTOOL_BITS   ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
+               (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \
+               ~NETIF_F_NEVER_CHANGE)
+
+/* Segmentation offload feature mask */
+#define NETIF_F_GSO_MASK       (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
+               __NETIF_F_BIT(NETIF_F_GSO_SHIFT))
+
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+                                NETIF_F_TSO6 | NETIF_F_UFO)
+
+#define NETIF_F_GEN_CSUM       NETIF_F_HW_CSUM
+#define NETIF_F_V4_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
+#define NETIF_F_V6_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
+#define NETIF_F_ALL_CSUM       (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+
+#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+                                NETIF_F_FSO)
+
+/*
+ * If one device supports one of these features, then enable them
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ONE_FOR_ALL    (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
+                                NETIF_F_SG | NETIF_F_HIGHDMA |         \
+                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
+/*
+ * If one device doesn't support one of these features, then disable it
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ALL_FOR_ALL    (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
+
+/* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
+
+#endif /* _LINUX_NETDEV_FEATURES_H */
index a82ad4d..6b9d4ed 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/rculist.h>
 #include <linux/dmaengine.h>
 #include <linux/workqueue.h>
+#include <linux/dynamic_queue_limits.h>
 
 #include <linux/ethtool.h>
 #include <net/net_namespace.h>
 #ifdef CONFIG_DCB
 #include <net/dcbnl.h>
 #endif
+#include <net/netprio_cgroup.h>
+
+#include <linux/netdev_features.h>
 
-struct vlan_group;
 struct netpoll_info;
 struct phy_device;
 /* 802.11 specific */
@@ -141,22 +144,20 @@ static inline bool dev_xmit_complete(int rc)
  *     used.
  */
 
-#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
 # if defined(CONFIG_MAC80211_MESH)
 #  define LL_MAX_HEADER 128
 # else
 #  define LL_MAX_HEADER 96
 # endif
-#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#elif IS_ENABLED(CONFIG_TR)
 # define LL_MAX_HEADER 48
 #else
 # define LL_MAX_HEADER 32
 #endif
 
-#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
-    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
-    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
-    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
+#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
+    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
 #define MAX_HEADER LL_MAX_HEADER
 #else
 #define MAX_HEADER (LL_MAX_HEADER + 48)
@@ -212,6 +213,11 @@ enum {
 #include <linux/cache.h>
 #include <linux/skbuff.h>
 
+#ifdef CONFIG_RPS
+#include <linux/jump_label.h>
+extern struct jump_label_key rps_needed;
+#endif
+
 struct neighbour;
 struct neigh_parms;
 struct sk_buff;
@@ -272,16 +278,11 @@ struct hh_cache {
  *
  * We could use other alignment values, but we must maintain the
  * relationship HH alignment <= LL alignment.
- *
- * LL_ALLOCATED_SPACE also takes into account the tailroom the device
- * may need.
  */
 #define LL_RESERVED_SPACE(dev) \
        ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
        ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
-#define LL_ALLOCATED_SPACE(dev) \
-       ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 
 struct header_ops {
        int     (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -516,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
 #endif
 
 enum netdev_queue_state_t {
-       __QUEUE_STATE_XOFF,
+       __QUEUE_STATE_DRV_XOFF,
+       __QUEUE_STATE_STACK_XOFF,
        __QUEUE_STATE_FROZEN,
-#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)          | \
-                                   (1 << __QUEUE_STATE_FROZEN))
+#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)            | \
+                             (1 << __QUEUE_STATE_STACK_XOFF))
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF           | \
+                                       (1 << __QUEUE_STATE_FROZEN))
 };
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
+ * netif_tx_* functions below are used to manipulate this flag.  The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently.  The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state).  Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
 
 struct netdev_queue {
 /*
@@ -528,9 +541,8 @@ struct netdev_queue {
  */
        struct net_device       *dev;
        struct Qdisc            *qdisc;
-       unsigned long           state;
        struct Qdisc            *qdisc_sleeping;
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        struct kobject          kobj;
 #endif
 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
@@ -545,6 +557,18 @@ struct netdev_queue {
         * please use this field instead of dev->trans_start
         */
        unsigned long           trans_start;
+
+       /*
+        * Number of TX timeouts for this queue
+        * (/sys/class/net/DEV/Q/trans_timeout)
+        */
+       unsigned long           trans_timeout;
+
+       unsigned long           state;
+
+#ifdef CONFIG_BQL
+       struct dql              dql;
+#endif
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -767,11 +791,11 @@ struct netdev_tc_txq {
  *     3. Update dev->stats asynchronously and atomically, and define
  *        neither operation.
  *
- * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
  *     If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *     this function is called when a VLAN id is registered.
  *
- * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
  *     If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
  *     this function is called when a VLAN id is unregistered.
  *
@@ -845,12 +869,13 @@ struct netdev_tc_txq {
  *     Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
- * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *             netdev_features_t features);
  *     Adjusts the requested feature flags according to device-specific
  *     constraints, and returns the resulting flags. Must not modify
  *     the device state.
  *
- * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *     Called to update device configuration to new features. Passed
  *     feature set might be less than what was returned by ndo_fix_features()).
  *     Must return >0 or -errno if it changed dev->features itself.
@@ -885,9 +910,9 @@ struct net_device_ops {
                                                     struct rtnl_link_stats64 *storage);
        struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 
-       void                    (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+       int                     (*ndo_vlan_rx_add_vid)(struct net_device *dev,
                                                       unsigned short vid);
-       void                    (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+       int                     (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
                                                        unsigned short vid);
 #ifdef CONFIG_NET_POLL_CONTROLLER
        void                    (*ndo_poll_controller)(struct net_device *dev);
@@ -912,7 +937,7 @@ struct net_device_ops {
        int                     (*ndo_get_vf_port)(struct net_device *dev,
                                                   int vf, struct sk_buff *skb);
        int                     (*ndo_setup_tc)(struct net_device *dev, u8 tc);
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        int                     (*ndo_fcoe_enable)(struct net_device *dev);
        int                     (*ndo_fcoe_disable)(struct net_device *dev);
        int                     (*ndo_fcoe_ddp_setup)(struct net_device *dev,
@@ -927,7 +952,7 @@ struct net_device_ops {
                                                       unsigned int sgc);
 #endif
 
-#if defined(CONFIG_LIBFCOE) || defined(CONFIG_LIBFCOE_MODULE)
+#if IS_ENABLED(CONFIG_LIBFCOE)
 #define NETDEV_FCOE_WWNN 0
 #define NETDEV_FCOE_WWPN 1
        int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
@@ -944,10 +969,11 @@ struct net_device_ops {
                                                 struct net_device *slave_dev);
        int                     (*ndo_del_slave)(struct net_device *dev,
                                                 struct net_device *slave_dev);
-       u32                     (*ndo_fix_features)(struct net_device *dev,
-                                                   u32 features);
+       netdev_features_t       (*ndo_fix_features)(struct net_device *dev,
+                                                   netdev_features_t features);
        int                     (*ndo_set_features)(struct net_device *dev,
-                                                   u32 features);
+                                                   netdev_features_t features);
+       int                     (*ndo_neigh_construct)(struct neighbour *n);
 };
 
 /*
@@ -997,91 +1023,13 @@ struct net_device {
        struct list_head        unreg_list;
 
        /* currently active device features */
-       u32                     features;
+       netdev_features_t       features;
        /* user-changeable features */
-       u32                     hw_features;
+       netdev_features_t       hw_features;
        /* user-requested features */
-       u32                     wanted_features;
+       netdev_features_t       wanted_features;
        /* mask of features inheritable by VLAN devices */
-       u32                     vlan_features;
-
-       /* Net device feature bits; if you change something,
-        * also update netdev_features_strings[] in ethtool.c */
-
-#define NETIF_F_SG             1       /* Scatter/gather IO. */
-#define NETIF_F_IP_CSUM                2       /* Can checksum TCP/UDP over IPv4. */
-#define NETIF_F_NO_CSUM                4       /* Does not require checksum. F.e. loopack. */
-#define NETIF_F_HW_CSUM                8       /* Can checksum all the packets. */
-#define NETIF_F_IPV6_CSUM      16      /* Can checksum TCP/UDP over IPV6 */
-#define NETIF_F_HIGHDMA                32      /* Can DMA to high memory. */
-#define NETIF_F_FRAGLIST       64      /* Scatter/gather IO. */
-#define NETIF_F_HW_VLAN_TX     128     /* Transmit VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_RX     256     /* Receive VLAN hw acceleration */
-#define NETIF_F_HW_VLAN_FILTER 512     /* Receive filtering on VLAN */
-#define NETIF_F_VLAN_CHALLENGED        1024    /* Device cannot handle VLAN packets */
-#define NETIF_F_GSO            2048    /* Enable software GSO. */
-#define NETIF_F_LLTX           4096    /* LockLess TX - deprecated. Please */
-                                       /* do not use LLTX in new drivers */
-#define NETIF_F_NETNS_LOCAL    8192    /* Does not change network namespaces */
-#define NETIF_F_GRO            16384   /* Generic receive offload */
-#define NETIF_F_LRO            32768   /* large receive offload */
-
-/* the GSO_MASK reserves bits 16 through 23 */
-#define NETIF_F_FCOE_CRC       (1 << 24) /* FCoE CRC32 */
-#define NETIF_F_SCTP_CSUM      (1 << 25) /* SCTP checksum offload */
-#define NETIF_F_FCOE_MTU       (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
-#define NETIF_F_NTUPLE         (1 << 27) /* N-tuple filters supported */
-#define NETIF_F_RXHASH         (1 << 28) /* Receive hashing offload */
-#define NETIF_F_RXCSUM         (1 << 29) /* Receive checksumming offload */
-#define NETIF_F_NOCACHE_COPY   (1 << 30) /* Use no-cache copyfromuser */
-#define NETIF_F_LOOPBACK       (1 << 31) /* Enable loopback */
-
-       /* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT      16
-#define NETIF_F_GSO_MASK       0x00ff0000
-#define NETIF_F_TSO            (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_UFO            (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
-#define NETIF_F_GSO_ROBUST     (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO_ECN                (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO6           (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_FSO            (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
-
-       /* Features valid for ethtool to change */
-       /* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE   (NETIF_F_VLAN_CHALLENGED | \
-                                 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
-#define NETIF_F_ETHTOOL_BITS   (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
-
-       /* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | \
-                                NETIF_F_TSO6 | NETIF_F_UFO)
-
-
-#define NETIF_F_GEN_CSUM       (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
-#define NETIF_F_V4_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
-#define NETIF_F_V6_CSUM                (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
-#define NETIF_F_ALL_CSUM       (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
-
-#define NETIF_F_ALL_TSO        (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-#define NETIF_F_ALL_FCOE       (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
-                                NETIF_F_FSO)
-
-       /*
-        * If one device supports one of these features, then enable them
-        * for all in netdev_increment_features.
-        */
-#define NETIF_F_ONE_FOR_ALL    (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
-                                NETIF_F_SG | NETIF_F_HIGHDMA |         \
-                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
-       /*
-        * If one device doesn't support one of these features, then disable it
-        * for all in netdev_increment_features.
-        */
-#define NETIF_F_ALL_FOR_ALL    (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
-
-       /* changeable features with no special hardware requirements */
-#define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
+       netdev_features_t       vlan_features;
 
        /* Interface index. Unique device identifier    */
        int                     ifindex;
@@ -1132,6 +1080,7 @@ struct net_device {
        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
        unsigned char           addr_assign_type; /* hw address assignment type */
        unsigned char           addr_len;       /* hardware address length      */
+       unsigned char           neigh_priv_len;
        unsigned short          dev_id;         /* for shared network cards */
 
        spinlock_t              addr_list_lock;
@@ -1144,11 +1093,11 @@ struct net_device {
 
        /* Protocol specific pointers */
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-       struct vlan_group __rcu *vlgrp;         /* VLAN group */
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+       struct vlan_info __rcu  *vlan_info;     /* VLAN info */
 #endif
-#ifdef CONFIG_NET_DSA
-       void                    *dsa_ptr;       /* dsa specific data */
+#if IS_ENABLED(CONFIG_NET_DSA)
+       struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
 #endif
        void                    *atalk_ptr;     /* AppleTalk link       */
        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
@@ -1184,9 +1133,11 @@ struct net_device {
 
        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
 
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        struct kset             *queues_kset;
+#endif
 
+#ifdef CONFIG_RPS
        struct netdev_rx_queue  *_rx;
 
        /* Number of RX queues allocated at register_netdev() time */
@@ -1308,10 +1259,13 @@ struct net_device {
        struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
        u8 prio_tc_map[TC_BITMASK + 1];
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        /* max exchange id for FCoE LRO by ddp */
        unsigned int            fcoe_ddp_xid;
 #endif
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+       struct netprio_map __rcu *priomap;
+#endif
        /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
 
@@ -1515,7 +1469,7 @@ struct packet_type {
                                         struct packet_type *,
                                         struct net_device *);
        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
-                                               u32 features);
+                                               netdev_features_t features);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
@@ -1783,7 +1737,7 @@ extern void __netif_schedule(struct Qdisc *q);
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
-       if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+       if (!(txq->state & QUEUE_STATE_ANY_XOFF))
                __netif_schedule(txq->qdisc);
 }
 
@@ -1797,7 +1751,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
 
 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
 {
-       clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1829,7 +1783,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
                return;
        }
 #endif
-       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
                __netif_schedule(dev_queue->qdisc);
 }
 
@@ -1861,7 +1815,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
                pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
                return;
        }
-       set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1888,7 +1842,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
 
 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
-       return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+       return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
 /**
@@ -1902,9 +1856,68 @@ static inline int netif_queue_stopped(const struct net_device *dev)
        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 }
 
-static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
 {
-       return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
+       return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+{
+       return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+                                       unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+       dql_queued(&dev_queue->dql, bytes);
+       if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
+               set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+               if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+                       clear_bit(__QUEUE_STATE_STACK_XOFF,
+                           &dev_queue->state);
+       }
+#endif
+}
+
+static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
+{
+       netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+                                            unsigned pkts, unsigned bytes)
+{
+#ifdef CONFIG_BQL
+       if (likely(bytes)) {
+               dql_completed(&dev_queue->dql, bytes);
+               if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
+                   &dev_queue->state) &&
+                   dql_avail(&dev_queue->dql) >= 0)) {
+                       if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
+                            &dev_queue->state))
+                               netif_schedule_queue(dev_queue);
+               }
+       }
+#endif
+}
+
+static inline void netdev_completed_queue(struct net_device *dev,
+                                         unsigned pkts, unsigned bytes)
+{
+       netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+#ifdef CONFIG_BQL
+       dql_reset(&q->dql);
+#endif
+}
+
+static inline void netdev_reset_queue(struct net_device *dev_queue)
+{
+       netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
 }
 
 /**
@@ -1991,7 +2004,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
        if (netpoll_trap())
                return;
 #endif
-       if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
                __netif_schedule(txq->qdisc);
 }
 
@@ -2520,7 +2533,8 @@ extern int                netdev_set_master(struct net_device *dev, struct net_device *master)
 extern int netdev_set_bond_master(struct net_device *dev,
                                  struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+       netdev_features_t features);
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
@@ -2549,11 +2563,13 @@ extern const char *netdev_drivername(const struct net_device *dev);
 
 extern void linkwatch_run_queue(void);
 
-static inline u32 netdev_get_wanted_features(struct net_device *dev)
+static inline netdev_features_t netdev_get_wanted_features(
+       struct net_device *dev)
 {
        return (dev->features & ~dev->hw_features) | dev->wanted_features;
 }
-u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+netdev_features_t netdev_increment_features(netdev_features_t all,
+       netdev_features_t one, netdev_features_t mask);
 int __netdev_update_features(struct net_device *dev);
 void netdev_update_features(struct net_device *dev);
 void netdev_change_features(struct net_device *dev);
@@ -2561,21 +2577,31 @@ void netdev_change_features(struct net_device *dev);
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
                                        struct net_device *dev);
 
-u32 netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_features(struct sk_buff *skb);
 
-static inline int net_gso_ok(u32 features, int gso_type)
+static inline int net_gso_ok(netdev_features_t features, int gso_type)
 {
-       int feature = gso_type << NETIF_F_GSO_SHIFT;
+       netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+
+       /* check flags correspondence */
+       BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
+       BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
+
        return (features & feature) == feature;
 }
 
-static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
+static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
 {
        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
               (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct sk_buff *skb, int features)
+static inline int netif_needs_gso(struct sk_buff *skb,
+       netdev_features_t features)
 {
        return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
@@ -2594,22 +2620,6 @@ static inline int netif_is_bond_slave(struct net_device *dev)
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
-static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
-{
-       if (dev->features & NETIF_F_RXCSUM)
-               return 1;
-       if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
-               return 0;
-       return dev->ethtool_ops->get_rx_csum(dev);
-}
-
-static inline u32 dev_ethtool_get_flags(struct net_device *dev)
-{
-       if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
-               return 0;
-       return dev->ethtool_ops->get_flags(dev);
-}
-
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* netdev_printk helpers, similar to dev_printk */
index 857f502..b809265 100644 (file)
@@ -162,6 +162,24 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
 
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
+#if defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+       if (__builtin_constant_p(pf) &&
+           __builtin_constant_p(hook))
+               return static_branch(&nf_hooks_needed[pf][hook]);
+
+       return !list_empty(&nf_hooks[pf][hook]);
+}
+#else
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+       return !list_empty(&nf_hooks[pf][hook]);
+}
+#endif
+
 int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
                 struct net_device *indev, struct net_device *outdev,
                 int (*okfn)(struct sk_buff *), int thresh);
@@ -179,11 +197,9 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
                                 struct net_device *outdev,
                                 int (*okfn)(struct sk_buff *), int thresh)
 {
-#ifndef CONFIG_NETFILTER_DEBUG
-       if (list_empty(&nf_hooks[pf][hook]))
-               return 1;
-#endif
-       return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+       if (nf_hooks_active(pf, hook))
+               return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
+       return 1;
 }
 
 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
index 8374d29..52e4895 100644 (file)
@@ -8,7 +8,7 @@
 #define NETLINK_UNUSED         1       /* Unused number                                */
 #define NETLINK_USERSOCK       2       /* Reserved for user mode socket protocols      */
 #define NETLINK_FIREWALL       3       /* Firewalling hook                             */
-#define NETLINK_INET_DIAG      4       /* INET socket monitoring                       */
+#define NETLINK_SOCK_DIAG      4       /* socket monitoring                            */
 #define NETLINK_NFLOG          5       /* netfilter/iptables ULOG */
 #define NETLINK_XFRM           6       /* ipsec */
 #define NETLINK_SELINUX                7       /* SELinux event notifications */
@@ -27,6 +27,8 @@
 #define NETLINK_RDMA           20
 #define NETLINK_CRYPTO         21      /* Crypto layer */
 
+#define NETLINK_INET_DIAG      NETLINK_SOCK_DIAG
+
 #define MAX_LINKS 32           
 
 struct sockaddr_nl {
index 8049bf7..a187606 100644 (file)
  * @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
  * @NL80211_CMD_TDLS_MGMT: Send a TDLS management frame.
  *
+ * @NL80211_CMD_UNEXPECTED_FRAME: Used by an application controlling an AP
+ *     (or GO) interface (i.e. hostapd) to ask for unexpected frames to
+ *     implement sending deauth to stations that send unexpected class 3
+ *     frames. Also used as the event sent by the kernel when such a frame
+ *     is received.
+ *     For the event, the %NL80211_ATTR_MAC attribute carries the TA and
+ *     other attributes like the interface index are present.
+ *     If used as the command it must have an interface index and you can
+ *     only unsubscribe from the event by closing the socket. Subscription
+ *     is also for %NL80211_CMD_UNEXPECTED_4ADDR_FRAME events.
+ *
+ * @NL80211_CMD_UNEXPECTED_4ADDR_FRAME: Sent as an event indicating that the
+ *     associated station identified by %NL80211_ATTR_MAC sent a 4addr frame
+ *     and wasn't already in a 4-addr VLAN. The event will be sent similarly
+ *     to the %NL80211_CMD_UNEXPECTED_FRAME event, to the same listener.
+ *
+ * @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
+ *     by sending a null data frame to it and reporting when the frame is
+ *     acknowleged. This is used to allow timing out inactive clients. Uses
+ *     %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
+ *     direct reply with an %NL80211_ATTR_COOKIE that is later used to match
+ *     up the event with the request. The event includes the same data and
+ *     has %NL80211_ATTR_ACK set if the frame was ACKed.
+ *
+ * @NL80211_CMD_REGISTER_BEACONS: Register this socket to receive beacons from
+ *     other BSSes when any interfaces are in AP mode. This helps implement
+ *     OLBC handling in hostapd. Beacons are reported in %NL80211_CMD_FRAME
+ *     messages. Note that per PHY only one application may register.
+ *
+ * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
+ *      No Acknowledgement Policy should be applied.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -638,6 +670,16 @@ enum nl80211_commands {
        NL80211_CMD_TDLS_OPER,
        NL80211_CMD_TDLS_MGMT,
 
+       NL80211_CMD_UNEXPECTED_FRAME,
+
+       NL80211_CMD_PROBE_CLIENT,
+
+       NL80211_CMD_REGISTER_BEACONS,
+
+       NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+
+       NL80211_CMD_SET_NOACK_MAP,
+
        /* add new commands above here */
 
        /* used to define NL80211_CMD_MAX below */
@@ -658,6 +700,8 @@ enum nl80211_commands {
 #define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
 #define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT
 
+#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+
 /* source-level API compatibility */
 #define NL80211_CMD_GET_MESH_PARAMS NL80211_CMD_GET_MESH_CONFIG
 #define NL80211_CMD_SET_MESH_PARAMS NL80211_CMD_SET_MESH_CONFIG
@@ -1109,6 +1153,46 @@ enum nl80211_commands {
  *     %NL80211_CMD_TDLS_MGMT. Otherwise %NL80211_CMD_TDLS_OPER should be
  *     used for asking the driver to perform a TDLS operation.
  *
+ * @NL80211_ATTR_DEVICE_AP_SME: This u32 attribute may be listed for devices
+ *     that have AP support to indicate that they have the AP SME integrated
+ *     with support for the features listed in this attribute, see
+ *     &enum nl80211_ap_sme_features.
+ *
+ * @NL80211_ATTR_DONT_WAIT_FOR_ACK: Used with %NL80211_CMD_FRAME, this tells
+ *     the driver to not wait for an acknowledgement. Note that due to this,
+ *     it will also not give a status callback nor return a cookie. This is
+ *     mostly useful for probe responses to save airtime.
+ *
+ * @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
+ *     &enum nl80211_feature_flags and is advertised in wiphy information.
+ * @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
+ *
+ *     requests while operating in AP-mode.
+ *     This attribute holds a bitmap of the supported protocols for
+ *     offloading (see &enum nl80211_probe_resp_offload_support_attr).
+ *
+ * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
+ *     probe-response frame. The DA field in the 802.11 header is zero-ed out,
+ *     to be filled by the FW.
+ * @NL80211_ATTR_DISABLE_HT:  Force HT capable interfaces to disable
+ *      this feature.  Currently, only supported in mac80211 drivers.
+ * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
+ *      ATTR_HT_CAPABILITY to which attention should be paid.
+ *      Currently, only mac80211 NICs support this feature.
+ *      The values that may be configured are:
+ *       MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40
+ *       AMPDU density and AMPDU factor.
+ *      All values are treated as suggestions and may be ignored
+ *      by the driver as required.  The actual values may be seen in
+ *      the station debugfs ht_caps file.
+ *
+ * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country
+ *    abides to when initiating radiation on DFS channels. A country maps
+ *    to one DFS region.
+ *
+ * @NL80211_ATTR_NOACK_MAP: This u16 bitmap contains the No Ack Policy of
+ *      up to 16 TIDs.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1337,6 +1421,23 @@ enum nl80211_attrs {
        NL80211_ATTR_TDLS_SUPPORT,
        NL80211_ATTR_TDLS_EXTERNAL_SETUP,
 
+       NL80211_ATTR_DEVICE_AP_SME,
+
+       NL80211_ATTR_DONT_WAIT_FOR_ACK,
+
+       NL80211_ATTR_FEATURE_FLAGS,
+
+       NL80211_ATTR_PROBE_RESP_OFFLOAD,
+
+       NL80211_ATTR_PROBE_RESP,
+
+       NL80211_ATTR_DFS_REGION,
+
+       NL80211_ATTR_DISABLE_HT,
+       NL80211_ATTR_HT_CAPABILITY_MASK,
+
+       NL80211_ATTR_NOACK_MAP,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1371,6 +1472,7 @@ enum nl80211_attrs {
 #define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES
 #define NL80211_ATTR_KEY NL80211_ATTR_KEY
 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
 
 #define NL80211_MAX_SUPP_RATES                 32
 #define NL80211_MAX_SUPP_REG_RULES             32
@@ -1845,6 +1947,21 @@ enum nl80211_reg_rule_flags {
 };
 
 /**
+ * enum nl80211_dfs_regions - regulatory DFS regions
+ *
+ * @NL80211_DFS_UNSET: Country has no DFS master region specified
+ * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC
+ * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI
+ * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec
+ */
+enum nl80211_dfs_regions {
+       NL80211_DFS_UNSET       = 0,
+       NL80211_DFS_FCC         = 1,
+       NL80211_DFS_ETSI        = 2,
+       NL80211_DFS_JP          = 3,
+};
+
+/**
  * enum nl80211_survey_info - survey information
  *
  * These attribute types are used with %NL80211_ATTR_SURVEY_INFO
@@ -1977,6 +2094,10 @@ enum nl80211_mntr_flags {
  * access to a broader network beyond the MBSS.  This is done via Root
  * Announcement frames.
  *
+ * @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
+ * TUs) during which a mesh STA can send only one Action frame containing a
+ * PERR element.
+ *
  * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
  *
  * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
@@ -2000,6 +2121,7 @@ enum nl80211_meshconf_params {
        NL80211_MESHCONF_ELEMENT_TTL,
        NL80211_MESHCONF_HWMP_RANN_INTERVAL,
        NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+       NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
 
        /* keep last */
        __NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2650,4 +2772,45 @@ enum nl80211_tdls_operation {
        NL80211_TDLS_DISABLE_LINK,
 };
 
+/*
+ * enum nl80211_ap_sme_features - device-integrated AP features
+ * Reserved for future use, no bits are defined in
+ * NL80211_ATTR_DEVICE_AP_SME yet.
+enum nl80211_ap_sme_features {
+};
+ */
+
+/**
+ * enum nl80211_feature_flags - device/driver features
+ * @NL80211_FEATURE_SK_TX_STATUS: This driver supports reflecting back
+ *     TX status to the socket error queue when requested with the
+ *     socket option.
+ * @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
+ */
+enum nl80211_feature_flags {
+       NL80211_FEATURE_SK_TX_STATUS    = 1 << 0,
+       NL80211_FEATURE_HT_IBSS         = 1 << 1,
+};
+
+/**
+ * enum nl80211_probe_resp_offload_support_attr - optional supported
+ *     protocols for probe-response offloading by the driver/FW.
+ *     To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
+ *     Each enum value represents a bit in the bitmap of supported
+ *     protocols. Typically a subset of probe-requests belonging to a
+ *     supported protocol will be excluded from offload and uploaded
+ *     to the host.
+ *
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P
+ * @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u
+ */
+enum nl80211_probe_resp_offload_support_attr {
+       NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS =        1<<0,
+       NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 =       1<<1,
+       NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P =        1<<2,
+       NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U =     1<<3,
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
new file mode 100644 (file)
index 0000000..eb1efa5
--- /dev/null
@@ -0,0 +1,452 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _LINUX_OPENVSWITCH_H
+#define _LINUX_OPENVSWITCH_H 1
+
+#include <linux/types.h>
+
+/**
+ * struct ovs_header - header for OVS Generic Netlink messages.
+ * @dp_ifindex: ifindex of local port for datapath (0 to make a request not
+ * specific to a datapath).
+ *
+ * Attributes following the header are specific to a particular OVS Generic
+ * Netlink family, but all of the OVS families use this header.
+ */
+
+struct ovs_header {
+       int dp_ifindex;
+};
+
+/* Datapaths. */
+
+#define OVS_DATAPATH_FAMILY  "ovs_datapath"
+#define OVS_DATAPATH_MCGROUP "ovs_datapath"
+#define OVS_DATAPATH_VERSION 0x1
+
+enum ovs_datapath_cmd {
+       OVS_DP_CMD_UNSPEC,
+       OVS_DP_CMD_NEW,
+       OVS_DP_CMD_DEL,
+       OVS_DP_CMD_GET,
+       OVS_DP_CMD_SET
+};
+
+/**
+ * enum ovs_datapath_attr - attributes for %OVS_DP_* commands.
+ * @OVS_DP_ATTR_NAME: Name of the network device that serves as the "local
+ * port".  This is the name of the network device whose dp_ifindex is given in
+ * the &struct ovs_header.  Always present in notifications.  Required in
+ * %OVS_DP_NEW requests.  May be used as an alternative to specifying
+ * dp_ifindex in other requests (with a dp_ifindex of 0).
+ * @OVS_DP_ATTR_UPCALL_PID: The Netlink socket in userspace that is initially
+ * set on the datapath port (for OVS_ACTION_ATTR_MISS).  Only valid on
+ * %OVS_DP_CMD_NEW requests. A value of zero indicates that upcalls should
+ * not be sent.
+ * @OVS_DP_ATTR_STATS: Statistics about packets that have passed through the
+ * datapath.  Always present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_DP_* commands.
+ */
+enum ovs_datapath_attr {
+       OVS_DP_ATTR_UNSPEC,
+       OVS_DP_ATTR_NAME,       /* name of dp_ifindex netdev */
+       OVS_DP_ATTR_UPCALL_PID, /* Netlink PID to receive upcalls */
+       OVS_DP_ATTR_STATS,      /* struct ovs_dp_stats */
+       __OVS_DP_ATTR_MAX
+};
+
+#define OVS_DP_ATTR_MAX (__OVS_DP_ATTR_MAX - 1)
+
+struct ovs_dp_stats {
+       __u64 n_hit;             /* Number of flow table matches. */
+       __u64 n_missed;          /* Number of flow table misses. */
+       __u64 n_lost;            /* Number of misses not sent to userspace. */
+       __u64 n_flows;           /* Number of flows present */
+};
+
+struct ovs_vport_stats {
+       __u64   rx_packets;             /* total packets received       */
+       __u64   tx_packets;             /* total packets transmitted    */
+       __u64   rx_bytes;               /* total bytes received         */
+       __u64   tx_bytes;               /* total bytes transmitted      */
+       __u64   rx_errors;              /* bad packets received         */
+       __u64   tx_errors;              /* packet transmit problems     */
+       __u64   rx_dropped;             /* no space in linux buffers    */
+       __u64   tx_dropped;             /* no space available in linux  */
+};
+
+/* Fixed logical ports. */
+#define OVSP_LOCAL      ((__u16)0)
+
+/* Packet transfer. */
+
+#define OVS_PACKET_FAMILY "ovs_packet"
+#define OVS_PACKET_VERSION 0x1
+
+enum ovs_packet_cmd {
+       OVS_PACKET_CMD_UNSPEC,
+
+       /* Kernel-to-user notifications. */
+       OVS_PACKET_CMD_MISS,    /* Flow table miss. */
+       OVS_PACKET_CMD_ACTION,  /* OVS_ACTION_ATTR_USERSPACE action. */
+
+       /* Userspace commands. */
+       OVS_PACKET_CMD_EXECUTE  /* Apply actions to a packet. */
+};
+
+/**
+ * enum ovs_packet_attr - attributes for %OVS_PACKET_* commands.
+ * @OVS_PACKET_ATTR_PACKET: Present for all notifications.  Contains the entire
+ * packet as received, from the start of the Ethernet header onward.  For
+ * %OVS_PACKET_CMD_ACTION, %OVS_PACKET_ATTR_PACKET reflects changes made by
+ * actions preceding %OVS_ACTION_ATTR_USERSPACE, but %OVS_PACKET_ATTR_KEY is
+ * the flow key extracted from the packet as originally received.
+ * @OVS_PACKET_ATTR_KEY: Present for all notifications.  Contains the flow key
+ * extracted from the packet as nested %OVS_KEY_ATTR_* attributes.  This allows
+ * userspace to adapt its flow setup strategy by comparing its notion of the
+ * flow key against the kernel's.
+ * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet.  Used
+ * for %OVS_PACKET_CMD_EXECUTE.  It has nested %OVS_ACTION_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
+ * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
+ * %OVS_USERSPACE_ATTR_USERDATA attribute.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_PACKET_* commands.
+ */
+enum ovs_packet_attr {
+       OVS_PACKET_ATTR_UNSPEC,
+       OVS_PACKET_ATTR_PACKET,      /* Packet data. */
+       OVS_PACKET_ATTR_KEY,         /* Nested OVS_KEY_ATTR_* attributes. */
+       OVS_PACKET_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
+       OVS_PACKET_ATTR_USERDATA,    /* u64 OVS_ACTION_ATTR_USERSPACE arg. */
+       __OVS_PACKET_ATTR_MAX
+};
+
+#define OVS_PACKET_ATTR_MAX (__OVS_PACKET_ATTR_MAX - 1)
+
+/* Virtual ports. */
+
+#define OVS_VPORT_FAMILY  "ovs_vport"
+#define OVS_VPORT_MCGROUP "ovs_vport"
+#define OVS_VPORT_VERSION 0x1
+
+enum ovs_vport_cmd {
+       OVS_VPORT_CMD_UNSPEC,
+       OVS_VPORT_CMD_NEW,
+       OVS_VPORT_CMD_DEL,
+       OVS_VPORT_CMD_GET,
+       OVS_VPORT_CMD_SET
+};
+
+enum ovs_vport_type {
+       OVS_VPORT_TYPE_UNSPEC,
+       OVS_VPORT_TYPE_NETDEV,   /* network device */
+       OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
+       __OVS_VPORT_TYPE_MAX
+};
+
+#define OVS_VPORT_TYPE_MAX (__OVS_VPORT_TYPE_MAX - 1)
+
+/**
+ * enum ovs_vport_attr - attributes for %OVS_VPORT_* commands.
+ * @OVS_VPORT_ATTR_PORT_NO: 32-bit port number within datapath.
+ * @OVS_VPORT_ATTR_TYPE: 32-bit %OVS_VPORT_TYPE_* constant describing the type
+ * of vport.
+ * @OVS_VPORT_ATTR_NAME: Name of vport.  For a vport based on a network device
+ * this is the name of the network device.  Maximum length %IFNAMSIZ-1 bytes
+ * plus a null terminator.
+ * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
+ * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
+ * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
+ * this port.  A value of zero indicates that upcalls should not be sent.
+ * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
+ * packets sent or received through the vport.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_VPORT_* commands.
+ *
+ * For %OVS_VPORT_CMD_NEW requests, the %OVS_VPORT_ATTR_TYPE and
+ * %OVS_VPORT_ATTR_NAME attributes are required.  %OVS_VPORT_ATTR_PORT_NO is
+ * optional; if not specified a free port number is automatically selected.
+ * Whether %OVS_VPORT_ATTR_OPTIONS is required or optional depends on the type
+ * of vport.
+ * and other attributes are ignored.
+ *
+ * For other requests, if %OVS_VPORT_ATTR_NAME is specified then it is used to
+ * look up the vport to operate on; otherwise dp_idx from the &struct
+ * ovs_header plus %OVS_VPORT_ATTR_PORT_NO determine the vport.
+ */
+enum ovs_vport_attr {
+       OVS_VPORT_ATTR_UNSPEC,
+       OVS_VPORT_ATTR_PORT_NO, /* u32 port number within datapath */
+       OVS_VPORT_ATTR_TYPE,    /* u32 OVS_VPORT_TYPE_* constant. */
+       OVS_VPORT_ATTR_NAME,    /* string name, up to IFNAMSIZ bytes long */
+       OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
+       OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
+       OVS_VPORT_ATTR_STATS,   /* struct ovs_vport_stats */
+       __OVS_VPORT_ATTR_MAX
+};
+
+#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+
+/* Flows. */
+
+#define OVS_FLOW_FAMILY  "ovs_flow"
+#define OVS_FLOW_MCGROUP "ovs_flow"
+#define OVS_FLOW_VERSION 0x1
+
+enum ovs_flow_cmd {
+       OVS_FLOW_CMD_UNSPEC,
+       OVS_FLOW_CMD_NEW,
+       OVS_FLOW_CMD_DEL,
+       OVS_FLOW_CMD_GET,
+       OVS_FLOW_CMD_SET
+};
+
+struct ovs_flow_stats {
+       __u64 n_packets;         /* Number of matched packets. */
+       __u64 n_bytes;           /* Number of matched bytes. */
+};
+
+enum ovs_key_attr {
+       OVS_KEY_ATTR_UNSPEC,
+       OVS_KEY_ATTR_ENCAP,     /* Nested set of encapsulated attributes. */
+       OVS_KEY_ATTR_PRIORITY,  /* u32 skb->priority */
+       OVS_KEY_ATTR_IN_PORT,   /* u32 OVS dp port number */
+       OVS_KEY_ATTR_ETHERNET,  /* struct ovs_key_ethernet */
+       OVS_KEY_ATTR_VLAN,      /* be16 VLAN TCI */
+       OVS_KEY_ATTR_ETHERTYPE, /* be16 Ethernet type */
+       OVS_KEY_ATTR_IPV4,      /* struct ovs_key_ipv4 */
+       OVS_KEY_ATTR_IPV6,      /* struct ovs_key_ipv6 */
+       OVS_KEY_ATTR_TCP,       /* struct ovs_key_tcp */
+       OVS_KEY_ATTR_UDP,       /* struct ovs_key_udp */
+       OVS_KEY_ATTR_ICMP,      /* struct ovs_key_icmp */
+       OVS_KEY_ATTR_ICMPV6,    /* struct ovs_key_icmpv6 */
+       OVS_KEY_ATTR_ARP,       /* struct ovs_key_arp */
+       OVS_KEY_ATTR_ND,        /* struct ovs_key_nd */
+       __OVS_KEY_ATTR_MAX
+};
+
+#define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1)
+
+/**
+ * enum ovs_frag_type - IPv4 and IPv6 fragment type
+ * @OVS_FRAG_TYPE_NONE: Packet is not a fragment.
+ * @OVS_FRAG_TYPE_FIRST: Packet is a fragment with offset 0.
+ * @OVS_FRAG_TYPE_LATER: Packet is a fragment with nonzero offset.
+ *
+ * Used as the @ipv4_frag in &struct ovs_key_ipv4 and as @ipv6_frag &struct
+ * ovs_key_ipv6.
+ */
+enum ovs_frag_type {
+       OVS_FRAG_TYPE_NONE,
+       OVS_FRAG_TYPE_FIRST,
+       OVS_FRAG_TYPE_LATER,
+       __OVS_FRAG_TYPE_MAX
+};
+
+#define OVS_FRAG_TYPE_MAX (__OVS_FRAG_TYPE_MAX - 1)
+
+struct ovs_key_ethernet {
+       __u8     eth_src[6];
+       __u8     eth_dst[6];
+};
+
+struct ovs_key_ipv4 {
+       __be32 ipv4_src;
+       __be32 ipv4_dst;
+       __u8   ipv4_proto;
+       __u8   ipv4_tos;
+       __u8   ipv4_ttl;
+       __u8   ipv4_frag;       /* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_ipv6 {
+       __be32 ipv6_src[4];
+       __be32 ipv6_dst[4];
+       __be32 ipv6_label;      /* 20-bits in least-significant bits. */
+       __u8   ipv6_proto;
+       __u8   ipv6_tclass;
+       __u8   ipv6_hlimit;
+       __u8   ipv6_frag;       /* One of OVS_FRAG_TYPE_*. */
+};
+
+struct ovs_key_tcp {
+       __be16 tcp_src;
+       __be16 tcp_dst;
+};
+
+struct ovs_key_udp {
+       __be16 udp_src;
+       __be16 udp_dst;
+};
+
+struct ovs_key_icmp {
+       __u8 icmp_type;
+       __u8 icmp_code;
+};
+
+struct ovs_key_icmpv6 {
+       __u8 icmpv6_type;
+       __u8 icmpv6_code;
+};
+
+struct ovs_key_arp {
+       __be32 arp_sip;
+       __be32 arp_tip;
+       __be16 arp_op;
+       __u8   arp_sha[6];
+       __u8   arp_tha[6];
+};
+
+struct ovs_key_nd {
+       __u32 nd_target[4];
+       __u8  nd_sll[6];
+       __u8  nd_tll[6];
+};
+
+/**
+ * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
+ * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
+ * key.  Always present in notifications.  Required for all requests (except
+ * dumps).
+ * @OVS_FLOW_ATTR_ACTIONS: Nested %OVS_ACTION_ATTR_* attributes specifying
+ * the actions to take for packets that match the key.  Always present in
+ * notifications.  Required for %OVS_FLOW_CMD_NEW requests, optional for
+ * %OVS_FLOW_CMD_SET requests.
+ * @OVS_FLOW_ATTR_STATS: &struct ovs_flow_stats giving statistics for this
+ * flow.  Present in notifications if the stats would be nonzero.  Ignored in
+ * requests.
+ * @OVS_FLOW_ATTR_TCP_FLAGS: An 8-bit value giving the OR'd value of all of the
+ * TCP flags seen on packets in this flow.  Only present in notifications for
+ * TCP flows, and only if it would be nonzero.  Ignored in requests.
+ * @OVS_FLOW_ATTR_USED: A 64-bit integer giving the time, in milliseconds on
+ * the system monotonic clock, at which a packet was last processed for this
+ * flow.  Only present in notifications if a packet has been processed for this
+ * flow.  Ignored in requests.
+ * @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
+ * last-used time, accumulated TCP flags, and statistics for this flow.
+ * Otherwise ignored in requests.  Never present in notifications.
+ *
+ * These attributes follow the &struct ovs_header within the Generic Netlink
+ * payload for %OVS_FLOW_* commands.
+ */
+enum ovs_flow_attr {
+       OVS_FLOW_ATTR_UNSPEC,
+       OVS_FLOW_ATTR_KEY,       /* Sequence of OVS_KEY_ATTR_* attributes. */
+       OVS_FLOW_ATTR_ACTIONS,   /* Nested OVS_ACTION_ATTR_* attributes. */
+       OVS_FLOW_ATTR_STATS,     /* struct ovs_flow_stats. */
+       OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
+       OVS_FLOW_ATTR_USED,      /* u64 msecs last used in monotonic time. */
+       OVS_FLOW_ATTR_CLEAR,     /* Flag to clear stats, tcp_flags, used. */
+       __OVS_FLOW_ATTR_MAX
+};
+
+#define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1)
+
+/**
+ * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action.
+ * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with
+ * @OVS_ACTION_ATTR_SAMPLE.  A value of 0 samples no packets, a value of
+ * %UINT32_MAX samples all packets and intermediate values sample intermediate
+ * fractions of packets.
+ * @OVS_SAMPLE_ATTR_ACTIONS: Set of actions to execute in sampling event.
+ * Actions are passed as nested attributes.
+ *
+ * Executes the specified actions with the given probability on a per-packet
+ * basis.
+ */
+enum ovs_sample_attr {
+       OVS_SAMPLE_ATTR_UNSPEC,
+       OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
+       OVS_SAMPLE_ATTR_ACTIONS,     /* Nested OVS_ACTION_ATTR_* attributes. */
+       __OVS_SAMPLE_ATTR_MAX,
+};
+
+#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
+
+/**
+ * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
+ * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
+ * message should be sent.  Required.
+ * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the
+ * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA,
+ */
+enum ovs_userspace_attr {
+       OVS_USERSPACE_ATTR_UNSPEC,
+       OVS_USERSPACE_ATTR_PID,       /* u32 Netlink PID to receive upcalls. */
+       OVS_USERSPACE_ATTR_USERDATA,  /* u64 optional user-specified cookie. */
+       __OVS_USERSPACE_ATTR_MAX
+};
+
+#define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1)
+
+/**
+ * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
+ * @vlan_tpid: Tag protocol identifier (TPID) to push.
+ * @vlan_tci: Tag control identifier (TCI) to push.  The CFI bit must be set
+ * (but it will not be set in the 802.1Q header that is pushed).
+ *
+ * The @vlan_tpid value is typically %ETH_P_8021Q.  The only acceptable TPID
+ * values are those that the kernel module also parses as 802.1Q headers, to
+ * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
+ * from having surprising results.
+ */
+struct ovs_action_push_vlan {
+       __be16 vlan_tpid;       /* 802.1Q TPID. */
+       __be16 vlan_tci;        /* 802.1Q TCI (VLAN ID and priority). */
+};
+
+/**
+ * enum ovs_action_attr - Action types.
+ *
+ * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
+ * @OVS_ACTION_ATTR_USERSPACE: Send packet to userspace according to nested
+ * %OVS_USERSPACE_ATTR_* attributes.
+ * @OVS_ACTION_ATTR_SET: Replaces the contents of an existing header.  The
+ * single nested %OVS_KEY_ATTR_* attribute specifies a header to modify and its
+ * value.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
+ * packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
+ * the nested %OVS_SAMPLE_ATTR_* attributes.
+ *
+ * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
+ * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
+ * type may not be changed.
+ */
+
+enum ovs_action_attr {
+       OVS_ACTION_ATTR_UNSPEC,
+       OVS_ACTION_ATTR_OUTPUT,       /* u32 port number. */
+       OVS_ACTION_ATTR_USERSPACE,    /* Nested OVS_USERSPACE_ATTR_*. */
+       OVS_ACTION_ATTR_SET,          /* One nested OVS_KEY_ATTR_*. */
+       OVS_ACTION_ATTR_PUSH_VLAN,    /* struct ovs_action_push_vlan. */
+       OVS_ACTION_ATTR_POP_VLAN,     /* No argument. */
+       OVS_ACTION_ATTR_SAMPLE,       /* Nested OVS_SAMPLE_ATTR_*. */
+       __OVS_ACTION_ATTR_MAX
+};
+
+#define OVS_ACTION_ATTR_MAX (__OVS_ACTION_ATTR_MAX - 1)
+
+#endif /* _LINUX_OPENVSWITCH_H */
index f53a416..f48bfc8 100644 (file)
@@ -38,6 +38,7 @@
 #define PNPIPE_ENCAP           1
 #define PNPIPE_IFINDEX         2
 #define PNPIPE_HANDLE          3
+#define PNPIPE_INITSTATE       4
 
 #define PNADDR_ANY             0
 #define PNADDR_BROADCAST       0xFC
@@ -49,6 +50,7 @@
 
 /* ioctls */
 #define SIOCPNGETOBJECT                (SIOCPROTOPRIVATE + 0)
+#define SIOCPNENABLEPIPE       (SIOCPROTOPRIVATE + 13)
 #define SIOCPNADDRESOURCE      (SIOCPROTOPRIVATE + 14)
 #define SIOCPNDELRESOURCE      (SIOCPROTOPRIVATE + 15)
 
index 7281d5a..8daced3 100644 (file)
@@ -181,6 +181,7 @@ enum {
        TCA_RED_UNSPEC,
        TCA_RED_PARMS,
        TCA_RED_STAB,
+       TCA_RED_MAX_P,
        __TCA_RED_MAX,
 };
 
@@ -194,8 +195,9 @@ struct tc_red_qopt {
        unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
        unsigned char   Scell_log;      /* cell size for idle damping */
        unsigned char   flags;
-#define TC_RED_ECN     1
-#define TC_RED_HARDDROP        2
+#define TC_RED_ECN             1
+#define TC_RED_HARDDROP                2
+#define TC_RED_ADAPTATIVE      4
 };
 
 struct tc_red_xstats {
@@ -214,6 +216,7 @@ enum {
        TCA_GRED_PARMS,
        TCA_GRED_STAB,
        TCA_GRED_DPS,
+       TCA_GRED_MAX_P,
           __TCA_GRED_MAX,
 };
 
@@ -253,6 +256,7 @@ enum {
        TCA_CHOKE_UNSPEC,
        TCA_CHOKE_PARMS,
        TCA_CHOKE_STAB,
+       TCA_CHOKE_MAX_P,
        __TCA_CHOKE_MAX,
 };
 
@@ -465,6 +469,7 @@ enum {
        TCA_NETEM_REORDER,
        TCA_NETEM_CORRUPT,
        TCA_NETEM_LOSS,
+       TCA_NETEM_RATE,
        __TCA_NETEM_MAX,
 };
 
@@ -495,6 +500,13 @@ struct tc_netem_corrupt {
        __u32   correlation;
 };
 
+struct tc_netem_rate {
+       __u32   rate;   /* byte/s */
+       __s32   packet_overhead;
+       __u32   cell_size;
+       __s32   cell_overhead;
+};
+
 enum {
        NETEM_LOSS_UNSPEC,
        NETEM_LOSS_GI,          /* General Intuitive - 4 state model */
index fe86488..12e6fed 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/dmaengine.h>
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
 
 /* Don't change this without changing skb_csum_unnecessary! */
 #define CHECKSUM_NONE 0
@@ -87,7 +88,6 @@
  *     at device setup time.
  *     NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  *                       everything.
- *     NETIF_F_NO_CSUM - loopback or reliable single hop media.
  *     NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  *                       TCP/UDP over IPv4. Sigh. Vendors like this
  *                       way by an unknown reason. Though, see comment above
@@ -218,6 +218,9 @@ enum {
 
        /* device driver supports TX zero-copy buffers */
        SKBTX_DEV_ZEROCOPY = 1 << 4,
+
+       /* generate wifi status information (where possible) */
+       SKBTX_WIFI_STATUS = 1 << 5,
 };
 
 /*
@@ -352,6 +355,8 @@ typedef unsigned char *sk_buff_data_t;
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
  *             ports.
+ *     @wifi_acked_valid: wifi_acked was set
+ *     @wifi_acked: whether frame was acked on wifi or not
  *     @dma_cookie: a cookie to one of several possible DMA operations
  *             done by skb DMA functions
  *     @secmark: security marking
@@ -445,10 +450,11 @@ struct sk_buff {
 #endif
        __u8                    ooo_okay:1;
        __u8                    l4_rxhash:1;
+       __u8                    wifi_acked_valid:1;
+       __u8                    wifi_acked:1;
+       /* 10/12 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
-       /* 0/13 bit hole */
-
 #ifdef CONFIG_NET_DMA
        dma_cookie_t            dma_cookie;
 #endif
@@ -540,6 +546,7 @@ extern void consume_skb(struct sk_buff *skb);
 extern void           __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
                                   gfp_t priority, int fclone, int node);
+extern struct sk_buff *build_skb(void *data);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
@@ -561,8 +568,9 @@ extern struct sk_buff *skb_clone(struct sk_buff *skb,
                                 gfp_t priority);
 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
                                gfp_t priority);
-extern struct sk_buff *pskb_copy(struct sk_buff *skb,
-                                gfp_t gfp_mask);
+extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
+                                int headroom, gfp_t gfp_mask);
+
 extern int            pskb_expand_head(struct sk_buff *skb,
                                        int nhead, int ntail,
                                        gfp_t gfp_mask);
@@ -1662,38 +1670,6 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
 }
 
 /**
- *     __netdev_alloc_page - allocate a page for ps-rx on a specific device
- *     @dev: network device to receive on
- *     @gfp_mask: alloc_pages_node mask
- *
- *     Allocate a new page. dev currently unused.
- *
- *     %NULL is returned if there is no free memory.
- */
-static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
-{
-       return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
-}
-
-/**
- *     netdev_alloc_page - allocate a page for ps-rx on a specific device
- *     @dev: network device to receive on
- *
- *     Allocate a new page. dev currently unused.
- *
- *     %NULL is returned if there is no free memory.
- */
-static inline struct page *netdev_alloc_page(struct net_device *dev)
-{
-       return __netdev_alloc_page(dev, GFP_ATOMIC);
-}
-
-static inline void netdev_free_page(struct net_device *dev, struct page *page)
-{
-       __free_page(page);
-}
-
-/**
  * skb_frag_page - retrieve the page refered to by a paged fragment
  * @frag: the paged fragment
  *
@@ -1824,6 +1800,12 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
                            frag->page_offset + offset, size, dir);
 }
 
+static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
+                                       gfp_t gfp_mask)
+{
+       return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
+}
+
 /**
  *     skb_clone_writable - is the header of a clone writable
  *     @skb: buffer to check
@@ -2105,7 +2087,8 @@ extern void              skb_split(struct sk_buff *skb,
 extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
                                 int shiftlen);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb,
+                                  netdev_features_t features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
@@ -2263,6 +2246,15 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
        sw_tx_timestamp(skb);
 }
 
+/**
+ * skb_complete_wifi_ack - deliver skb with wifi status
+ *
+ * @skb: the original outgoing packet
+ * @acked: ack status
+ *
+ */
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
+
 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
 
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644 (file)
index 0000000..ba4933b
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __SOCK_DIAG_H__
+#define __SOCK_DIAG_H__
+struct sk_buff;
+struct nlmsghdr;
+
+struct sock_diag_req {
+       __u8    sdiag_family;
+       __u8    sdiag_protocol;
+};
+
+struct sock_diag_handler {
+       __u8 family;
+       int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+int sock_diag_register(struct sock_diag_handler *h);
+void sock_diag_unregister(struct sock_diag_handler *h);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+
+extern struct sock *sock_diag_nlsk;
+#endif
index 3d8f9c4..2c5993a 100644 (file)
@@ -215,7 +215,7 @@ static inline bool __rpc_copy_addr4(struct sockaddr *dst,
        return true;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
                                   const struct sockaddr *sap2)
 {
@@ -237,10 +237,10 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
        struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
 
        dsin6->sin6_family = ssin6->sin6_family;
-       ipv6_addr_copy(&dsin6->sin6_addr, &ssin6->sin6_addr);
+       dsin6->sin6_addr = ssin6->sin6_addr;
        return true;
 }
-#else  /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#else  /* !(IS_ENABLED(CONFIG_IPV6) */
 static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
                                   const struct sockaddr *sap2)
 {
@@ -252,7 +252,7 @@ static inline bool __rpc_copy_addr6(struct sockaddr *dst,
 {
        return false;
 }
-#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
 
 /**
  * rpc_cmp_addr - compare the address portion of two sockaddrs.
index e9e72bd..5206d65 100644 (file)
  *     vdev: the virtio_device
  *     This gives the final feature bits for the device: it can change
  *     the dev->feature bits if it wants.
+ * @bus_name: return the bus name associated with the device
+ *     vdev: the virtio_device
+ *      This returns a pointer to the bus name a la pci_name from which
+ *      the caller can then copy.
  */
 typedef void vq_callback_t(struct virtqueue *);
 struct virtio_config_ops {
@@ -119,6 +123,7 @@ struct virtio_config_ops {
        void (*del_vqs)(struct virtio_device *);
        u32 (*get_features)(struct virtio_device *vdev);
        void (*finalize_features)(struct virtio_device *vdev);
+       const char *(*bus_name)(struct virtio_device *vdev);
 };
 
 /* If driver didn't advertise the feature, it will never appear. */
@@ -184,5 +189,14 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
                return ERR_PTR(err);
        return vq;
 }
+
+static inline
+const char *virtio_bus_name(struct virtio_device *vdev)
+{
+       if (!vdev->config->bus_name)
+               return "virtio";
+       return vdev->config->bus_name(vdev);
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_VIRTIO_CONFIG_H */
index 4b69739..0d63731 100644 (file)
@@ -54,6 +54,9 @@ struct wl12xx_platform_data {
        int board_ref_clock;
        int board_tcxo_clock;
        unsigned long platform_quirks;
+       bool pwr_in_suspend;
+
+       struct wl1271_if_operations *ops;
 };
 
 /* Platform does not support level trigger interrupts */
@@ -73,6 +76,6 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 
 #endif
 
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void);
+struct wl12xx_platform_data *wl12xx_get_platform_data(void);
 
 #endif
index 497ef64..5865924 100644 (file)
@@ -15,7 +15,6 @@
 
 
 #define CLIP_VCC(vcc) ((struct clip_vcc *) ((vcc)->user_back))
-#define NEIGH2ENTRY(neigh) ((struct atmarp_entry *) (neigh)->primary_key)
 
 struct sk_buff;
 
@@ -36,24 +35,18 @@ struct clip_vcc {
 
 
 struct atmarp_entry {
-       __be32          ip;             /* IP address */
        struct clip_vcc *vccs;          /* active VCCs; NULL if resolution is
                                           pending */
        unsigned long   expires;        /* entry expiration time */
        struct neighbour *neigh;        /* neighbour back-pointer */
 };
 
-
 #define PRIV(dev) ((struct clip_priv *) netdev_priv(dev))
 
-
 struct clip_priv {
        int number;                     /* for convenience ... */
        spinlock_t xoff_lock;           /* ensures that pop is atomic (SMP) */
        struct net_device *next;        /* next CLIP interface */
 };
 
-
-extern struct neigh_table *clip_tbl_hook;
-
 #endif
index e86af08..980e59f 100644 (file)
 #define PF_BLUETOOTH   AF_BLUETOOTH
 #endif
 
+/* Bluetooth versions */
+#define BLUETOOTH_VER_1_1      1
+#define BLUETOOTH_VER_1_2      2
+#define BLUETOOTH_VER_2_0      3
+
 /* Reserv for core and drivers use */
 #define BT_SKB_RESERVE 8
 
@@ -77,6 +82,33 @@ struct bt_power {
 #define BT_POWER_FORCE_ACTIVE_OFF 0
 #define BT_POWER_FORCE_ACTIVE_ON  1
 
+#define BT_CHANNEL_POLICY      10
+
+/* BR/EDR only (default policy)
+ *   AMP controllers cannot be used.
+ *   Channel move requests from the remote device are denied.
+ *   If the L2CAP channel is currently using AMP, move the channel to BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_BREDR_ONLY           0
+
+/* BR/EDR Preferred
+ *   Allow use of AMP controllers.
+ *   If the L2CAP channel is currently on AMP, move it to BR/EDR.
+ *   Channel move requests from the remote device are allowed.
+ */
+#define BT_CHANNEL_POLICY_BREDR_PREFERRED      1
+
+/* AMP Preferred
+ *   Allow use of AMP controllers
+ *   If the L2CAP channel is currently on BR/EDR and AMP controller
+ *     resources are available, initiate a channel move to AMP.
+ *   Channel move requests from the remote device are allowed.
+ *   If the L2CAP socket has not been connected yet, try to create
+ *     and configure the channel directly on an AMP controller rather
+ *     than BR/EDR.
+ */
+#define BT_CHANNEL_POLICY_AMP_PREFERRED                2
+
 __printf(2, 3)
 int bt_printk(const char *level, const char *fmt, ...);
 
@@ -158,7 +190,7 @@ struct bt_skb_cb {
        __u8 pkt_type;
        __u8 incoming;
        __u16 expect;
-       __u8 tx_seq;
+       __u16 tx_seq;
        __u8 retries;
        __u8 sar;
        unsigned short channel;
index aaf79af..67ad984 100644 (file)
@@ -88,6 +88,14 @@ enum {
        HCI_RESET,
 };
 
+/*
+ * BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states from the controller.
+ */
+enum {
+       HCI_LE_SCAN,
+};
+
 /* HCI ioctl defines */
 #define HCIDEVUP       _IOW('H', 201, int)
 #define HCIDEVDOWN     _IOW('H', 202, int)
@@ -264,6 +272,13 @@ enum {
 #define HCI_LK_SMP_IRK                 0x82
 #define HCI_LK_SMP_CSRK                        0x83
 
+/* ---- HCI Error Codes ---- */
+#define HCI_ERROR_AUTH_FAILURE         0x05
+#define HCI_ERROR_REJ_BAD_ADDR         0x0f
+#define HCI_ERROR_REMOTE_USER_TERM     0x13
+#define HCI_ERROR_LOCAL_HOST_TERM      0x16
+#define HCI_ERROR_PAIRING_NOT_ALLOWED  0x18
+
 /* -----  HCI Commands ---- */
 #define HCI_OP_NOP                     0x0000
 
@@ -446,6 +461,14 @@ struct hci_rp_user_confirm_reply {
 
 #define HCI_OP_USER_CONFIRM_NEG_REPLY  0x042d
 
+#define HCI_OP_USER_PASSKEY_REPLY              0x042e
+struct hci_cp_user_passkey_reply {
+       bdaddr_t bdaddr;
+       __le32  passkey;
+} __packed;
+
+#define HCI_OP_USER_PASSKEY_NEG_REPLY  0x042f
+
 #define HCI_OP_REMOTE_OOB_DATA_REPLY   0x0430
 struct hci_cp_remote_oob_data_reply {
        bdaddr_t bdaddr;
@@ -662,6 +685,12 @@ struct hci_rp_read_local_oob_data {
 
 #define HCI_OP_READ_INQ_RSP_TX_POWER   0x0c58
 
+#define HCI_OP_READ_FLOW_CONTROL_MODE  0x0c66
+struct hci_rp_read_flow_control_mode {
+       __u8     status;
+       __u8     mode;
+} __packed;
+
 #define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d
 struct hci_cp_write_le_host_supported {
        __u8 le;
@@ -726,6 +755,21 @@ struct hci_cp_write_page_scan_activity {
        #define PAGE_SCAN_TYPE_STANDARD         0x00
        #define PAGE_SCAN_TYPE_INTERLACED       0x01
 
+#define HCI_OP_READ_LOCAL_AMP_INFO     0x1409
+struct hci_rp_read_local_amp_info {
+       __u8     status;
+       __u8     amp_status;
+       __le32   total_bw;
+       __le32   max_bw;
+       __le32   min_latency;
+       __le32   max_pdu;
+       __u8     amp_type;
+       __le16   pal_cap;
+       __le16   max_assoc_size;
+       __le32   max_flush_to;
+       __le32   be_flush_to;
+} __packed;
+
 #define HCI_OP_LE_SET_EVENT_MASK       0x2001
 struct hci_cp_le_set_event_mask {
        __u8     mask[8];
@@ -738,6 +782,15 @@ struct hci_rp_le_read_buffer_size {
        __u8     le_max_pkt;
 } __packed;
 
+#define HCI_OP_LE_SET_SCAN_PARAM       0x200b
+struct hci_cp_le_set_scan_param {
+       __u8    type;
+       __le16  interval;
+       __le16  window;
+       __u8    own_address_type;
+       __u8    filter_policy;
+} __packed;
+
 #define HCI_OP_LE_SET_SCAN_ENABLE      0x200c
 struct hci_cp_le_set_scan_enable {
        __u8     enable;
@@ -1054,6 +1107,11 @@ struct hci_ev_user_confirm_req {
        __le32          passkey;
 } __packed;
 
+#define HCI_EV_USER_PASSKEY_REQUEST    0x34
+struct hci_ev_user_passkey_req {
+       bdaddr_t        bdaddr;
+} __packed;
+
 #define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35
 struct hci_ev_remote_oob_data_request {
        bdaddr_t bdaddr;
@@ -1309,4 +1367,6 @@ struct hci_inquiry_req {
 };
 #define IREQ_CACHE_FLUSH 0x0001
 
+extern int enable_hs;
+
 #endif /* __HCI_H */
index 3779ea3..ea4395f 100644 (file)
@@ -32,6 +32,9 @@
 #define HCI_PROTO_L2CAP        0
 #define HCI_PROTO_SCO  1
 
+/* HCI priority */
+#define HCI_PRIO_MAX   7
+
 /* HCI Core structures */
 struct inquiry_data {
        bdaddr_t        bdaddr;
@@ -64,6 +67,12 @@ struct hci_conn_hash {
        unsigned int     le_num;
 };
 
+struct hci_chan_hash {
+       struct list_head list;
+       spinlock_t       lock;
+       unsigned int     num;
+};
+
 struct bdaddr_list {
        struct list_head list;
        bdaddr_t bdaddr;
@@ -150,6 +159,19 @@ struct hci_dev {
        __u16           sniff_min_interval;
        __u16           sniff_max_interval;
 
+       __u8            amp_status;
+       __u32           amp_total_bw;
+       __u32           amp_max_bw;
+       __u32           amp_min_latency;
+       __u32           amp_max_pdu;
+       __u8            amp_type;
+       __u16           amp_pal_cap;
+       __u16           amp_assoc_size;
+       __u32           amp_max_flush_to;
+       __u32           amp_be_flush_to;
+
+       __u8            flow_ctl_mode;
+
        unsigned int    auto_accept_delay;
 
        unsigned long   quirks;
@@ -173,8 +195,10 @@ struct hci_dev {
        struct workqueue_struct *workqueue;
 
        struct work_struct      power_on;
-       struct work_struct      power_off;
-       struct timer_list       off_timer;
+       struct delayed_work     power_off;
+
+       __u16                   discov_timeout;
+       struct delayed_work     discov_off;
 
        struct timer_list       cmd_timer;
        struct tasklet_struct   cmd_task;
@@ -195,6 +219,8 @@ struct hci_dev {
 
        __u16                   init_last_cmd;
 
+       struct list_head        mgmt_pending;
+
        struct inquiry_cache    inq_cache;
        struct hci_conn_hash    conn_hash;
        struct list_head        blacklist;
@@ -226,6 +252,8 @@ struct hci_dev {
 
        struct module           *owner;
 
+       unsigned long           dev_flags;
+
        int (*open)(struct hci_dev *hdev);
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
@@ -273,6 +301,7 @@ struct hci_conn {
        unsigned int    sent;
 
        struct sk_buff_head data_q;
+       struct hci_chan_hash chan_hash;
 
        struct timer_list disc_timer;
        struct timer_list idle_timer;
@@ -295,6 +324,14 @@ struct hci_conn {
        void (*disconn_cfm_cb)  (struct hci_conn *conn, u8 reason);
 };
 
+struct hci_chan {
+       struct list_head list;
+
+       struct hci_conn *conn;
+       struct sk_buff_head data_q;
+       unsigned int    sent;
+};
+
 extern struct hci_proto *hci_proto[];
 extern struct list_head hci_dev_list;
 extern struct list_head hci_cb_list;
@@ -455,6 +492,28 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
        return NULL;
 }
 
+static inline void hci_chan_hash_init(struct hci_conn *c)
+{
+       struct hci_chan_hash *h = &c->chan_hash;
+       INIT_LIST_HEAD(&h->list);
+       spin_lock_init(&h->lock);
+       h->num = 0;
+}
+
+static inline void hci_chan_hash_add(struct hci_conn *c, struct hci_chan *chan)
+{
+       struct hci_chan_hash *h = &c->chan_hash;
+       list_add(&chan->list, &h->list);
+       h->num++;
+}
+
+static inline void hci_chan_hash_del(struct hci_conn *c, struct hci_chan *chan)
+{
+       struct hci_chan_hash *h = &c->chan_hash;
+       list_del(&chan->list);
+       h->num--;
+}
+
 void hci_acl_connect(struct hci_conn *conn);
 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
 void hci_add_sco(struct hci_conn *conn, __u16 handle);
@@ -466,6 +525,10 @@ int hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
+struct hci_chan *hci_chan_create(struct hci_conn *conn);
+int hci_chan_del(struct hci_chan *chan);
+void hci_chan_hash_flush(struct hci_conn *conn);
+
 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
                                                __u8 sec_level, __u8 auth_type);
 int hci_conn_check_link_mode(struct hci_conn *conn);
@@ -545,7 +608,7 @@ struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
 struct hci_dev *hci_alloc_dev(void);
 void hci_free_dev(struct hci_dev *hdev);
 int hci_register_dev(struct hci_dev *hdev);
-int hci_unregister_dev(struct hci_dev *hdev);
+void hci_unregister_dev(struct hci_dev *hdev);
 int hci_suspend_dev(struct hci_dev *hdev);
 int hci_resume_dev(struct hci_dev *hdev);
 int hci_dev_open(__u16 dev);
@@ -599,8 +662,9 @@ int hci_recv_frame(struct sk_buff *skb);
 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
 
-int hci_register_sysfs(struct hci_dev *hdev);
-void hci_unregister_sysfs(struct hci_dev *hdev);
+void hci_init_sysfs(struct hci_dev *hdev);
+int hci_add_sysfs(struct hci_dev *hdev);
+void hci_del_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
 void hci_conn_add_sysfs(struct hci_conn *conn);
 void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -676,7 +740,7 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
 {
        register struct hci_proto *hp;
-       int reason = 0x13;
+       int reason = HCI_ERROR_REMOTE_USER_TERM;
 
        hp = hci_proto[HCI_PROTO_L2CAP];
        if (hp && hp->disconn_ind)
@@ -836,7 +900,7 @@ int hci_register_notifier(struct notifier_block *nb);
 int hci_unregister_notifier(struct notifier_block *nb);
 
 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
 
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
@@ -849,34 +913,49 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
 
 /* Management interface */
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(u16 index);
-int mgmt_index_removed(u16 index);
-int mgmt_powered(u16 index, u8 powered);
-int mgmt_discoverable(u16 index, u8 discoverable);
-int mgmt_connectable(u16 index, u8 connectable);
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent);
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type);
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr);
-int mgmt_disconnect_failed(u16 index);
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure);
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
-                                                       u8 confirm_hint);
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
+int mgmt_index_added(struct hci_dev *hdev);
+int mgmt_index_removed(struct hci_dev *hdev);
+int mgmt_powered(struct hci_dev *hdev, u8 powered);
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+                                                               u8 persistent);
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type);
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type);
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                               u8 addr_type, u8 status);
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                                u8 status);
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                                                u8 status);
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
-                                                               u8 *eir);
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name);
-int mgmt_discovering(u16 index, u8 discovering);
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr);
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr);
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                               __le32 value, u8 confirm_hint);
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status);
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+                                               bdaddr_t *bdaddr, u8 status);
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status);
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+                                               bdaddr_t *bdaddr, u8 status);
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status);
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+                                               u8 *randomizer, u8 status);
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                               u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir);
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name);
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -915,4 +994,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
 void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]);
 void hci_le_ltk_neg_reply(struct hci_conn *conn);
 
+int hci_do_inquiry(struct hci_dev *hdev, u8 length);
+int hci_cancel_inquiry(struct hci_dev *hdev);
+
 #endif /* __HCI_CORE_H */
index 6cc18f3..30719eb 100644 (file)
 #ifndef __L2CAP_H
 #define __L2CAP_H
 
+#include <asm/unaligned.h>
+
 /* L2CAP defaults */
 #define L2CAP_DEFAULT_MTU              672
 #define L2CAP_DEFAULT_MIN_MTU          48
 #define L2CAP_DEFAULT_FLUSH_TO         0xffff
 #define L2CAP_DEFAULT_TX_WINDOW                63
+#define L2CAP_DEFAULT_EXT_WINDOW       0x3FFF
 #define L2CAP_DEFAULT_MAX_TX           3
 #define L2CAP_DEFAULT_RETRANS_TO       2000    /* 2 seconds */
 #define L2CAP_DEFAULT_MONITOR_TO       12000   /* 12 seconds */
 #define L2CAP_DEFAULT_MAX_PDU_SIZE     1009    /* Sized for 3-DH5 packet */
 #define L2CAP_DEFAULT_ACK_TO           200
 #define L2CAP_LE_DEFAULT_MTU           23
+#define L2CAP_DEFAULT_MAX_SDU_SIZE     0xFFFF
+#define L2CAP_DEFAULT_SDU_ITIME                0xFFFFFFFF
+#define L2CAP_DEFAULT_ACC_LAT          0xFFFFFFFF
 
 #define L2CAP_DISC_TIMEOUT             (100)
 #define L2CAP_DISC_REJ_TIMEOUT         (5000)  /*  5 seconds */
@@ -91,52 +97,82 @@ struct l2cap_conninfo {
 #define L2CAP_ECHO_RSP         0x09
 #define L2CAP_INFO_REQ         0x0a
 #define L2CAP_INFO_RSP         0x0b
+#define L2CAP_CREATE_CHAN_REQ  0x0c
+#define L2CAP_CREATE_CHAN_RSP  0x0d
+#define L2CAP_MOVE_CHAN_REQ    0x0e
+#define L2CAP_MOVE_CHAN_RSP    0x0f
+#define L2CAP_MOVE_CHAN_CFM    0x10
+#define L2CAP_MOVE_CHAN_CFM_RSP        0x11
 #define L2CAP_CONN_PARAM_UPDATE_REQ    0x12
 #define L2CAP_CONN_PARAM_UPDATE_RSP    0x13
 
-/* L2CAP feature mask */
+/* L2CAP extended feature mask */
 #define L2CAP_FEAT_FLOWCTL     0x00000001
 #define L2CAP_FEAT_RETRANS     0x00000002
+#define L2CAP_FEAT_BIDIR_QOS   0x00000004
 #define L2CAP_FEAT_ERTM                0x00000008
 #define L2CAP_FEAT_STREAMING   0x00000010
 #define L2CAP_FEAT_FCS         0x00000020
+#define L2CAP_FEAT_EXT_FLOW    0x00000040
 #define L2CAP_FEAT_FIXED_CHAN  0x00000080
+#define L2CAP_FEAT_EXT_WINDOW  0x00000100
+#define L2CAP_FEAT_UCD         0x00000200
 
 /* L2CAP checksum option */
 #define L2CAP_FCS_NONE         0x00
 #define L2CAP_FCS_CRC16                0x01
 
+/* L2CAP fixed channels */
+#define L2CAP_FC_L2CAP         0x02
+#define L2CAP_FC_A2MP          0x08
+
 /* L2CAP Control Field bit masks */
-#define L2CAP_CTRL_SAR               0xC000
-#define L2CAP_CTRL_REQSEQ            0x3F00
-#define L2CAP_CTRL_TXSEQ             0x007E
-#define L2CAP_CTRL_RETRANS           0x0080
-#define L2CAP_CTRL_FINAL             0x0080
-#define L2CAP_CTRL_POLL              0x0010
-#define L2CAP_CTRL_SUPERVISE         0x000C
-#define L2CAP_CTRL_FRAME_TYPE        0x0001 /* I- or S-Frame */
-
-#define L2CAP_CTRL_TXSEQ_SHIFT      1
-#define L2CAP_CTRL_REQSEQ_SHIFT     8
-#define L2CAP_CTRL_SAR_SHIFT       14
+#define L2CAP_CTRL_SAR                 0xC000
+#define L2CAP_CTRL_REQSEQ              0x3F00
+#define L2CAP_CTRL_TXSEQ               0x007E
+#define L2CAP_CTRL_SUPERVISE           0x000C
+
+#define L2CAP_CTRL_RETRANS             0x0080
+#define L2CAP_CTRL_FINAL               0x0080
+#define L2CAP_CTRL_POLL                        0x0010
+#define L2CAP_CTRL_FRAME_TYPE          0x0001 /* I- or S-Frame */
+
+#define L2CAP_CTRL_TXSEQ_SHIFT         1
+#define L2CAP_CTRL_SUPER_SHIFT         2
+#define L2CAP_CTRL_REQSEQ_SHIFT                8
+#define L2CAP_CTRL_SAR_SHIFT           14
+
+/* L2CAP Extended Control Field bit mask */
+#define L2CAP_EXT_CTRL_TXSEQ           0xFFFC0000
+#define L2CAP_EXT_CTRL_SAR             0x00030000
+#define L2CAP_EXT_CTRL_SUPERVISE       0x00030000
+#define L2CAP_EXT_CTRL_REQSEQ          0x0000FFFC
+
+#define L2CAP_EXT_CTRL_POLL            0x00040000
+#define L2CAP_EXT_CTRL_FINAL           0x00000002
+#define L2CAP_EXT_CTRL_FRAME_TYPE      0x00000001 /* I- or S-Frame */
+
+#define L2CAP_EXT_CTRL_REQSEQ_SHIFT    2
+#define L2CAP_EXT_CTRL_SAR_SHIFT       16
+#define L2CAP_EXT_CTRL_SUPER_SHIFT     16
+#define L2CAP_EXT_CTRL_TXSEQ_SHIFT     18
 
 /* L2CAP Supervisory Function */
-#define L2CAP_SUPER_RCV_READY           0x0000
-#define L2CAP_SUPER_REJECT              0x0004
-#define L2CAP_SUPER_RCV_NOT_READY       0x0008
-#define L2CAP_SUPER_SELECT_REJECT       0x000C
+#define L2CAP_SUPER_RR         0x00
+#define L2CAP_SUPER_REJ                0x01
+#define L2CAP_SUPER_RNR                0x02
+#define L2CAP_SUPER_SREJ       0x03
 
 /* L2CAP Segmentation and Reassembly */
-#define L2CAP_SDU_UNSEGMENTED       0x0000
-#define L2CAP_SDU_START             0x4000
-#define L2CAP_SDU_END               0x8000
-#define L2CAP_SDU_CONTINUE          0xC000
+#define L2CAP_SAR_UNSEGMENTED  0x00
+#define L2CAP_SAR_START                0x01
+#define L2CAP_SAR_END          0x02
+#define L2CAP_SAR_CONTINUE     0x03
 
 /* L2CAP Command rej. reasons */
-#define L2CAP_REJ_NOT_UNDERSTOOD      0x0000
-#define L2CAP_REJ_MTU_EXCEEDED        0x0001
-#define L2CAP_REJ_INVALID_CID         0x0002
-
+#define L2CAP_REJ_NOT_UNDERSTOOD       0x0000
+#define L2CAP_REJ_MTU_EXCEEDED         0x0001
+#define L2CAP_REJ_INVALID_CID          0x0002
 
 /* L2CAP structures */
 struct l2cap_hdr {
@@ -144,6 +180,12 @@ struct l2cap_hdr {
        __le16     cid;
 } __packed;
 #define L2CAP_HDR_SIZE         4
+#define L2CAP_ENH_HDR_SIZE     6
+#define L2CAP_EXT_HDR_SIZE     8
+
+#define L2CAP_FCS_SIZE         2
+#define L2CAP_SDULEN_SIZE      2
+#define L2CAP_PSMLEN_SIZE      2
 
 struct l2cap_cmd_hdr {
        __u8       code;
@@ -188,14 +230,15 @@ struct l2cap_conn_rsp {
 #define L2CAP_CID_DYN_START    0x0040
 #define L2CAP_CID_DYN_END      0xffff
 
-/* connect result */
+/* connect/create channel results */
 #define L2CAP_CR_SUCCESS       0x0000
 #define L2CAP_CR_PEND          0x0001
 #define L2CAP_CR_BAD_PSM       0x0002
 #define L2CAP_CR_SEC_BLOCK     0x0003
 #define L2CAP_CR_NO_MEM                0x0004
+#define L2CAP_CR_BAD_AMP       0x0005
 
-/* connect status */
+/* connect/create channel status */
 #define L2CAP_CS_NO_INFO       0x0000
 #define L2CAP_CS_AUTHEN_PEND   0x0001
 #define L2CAP_CS_AUTHOR_PEND   0x0002
@@ -217,6 +260,8 @@ struct l2cap_conf_rsp {
 #define L2CAP_CONF_UNACCEPT    0x0001
 #define L2CAP_CONF_REJECT      0x0002
 #define L2CAP_CONF_UNKNOWN     0x0003
+#define L2CAP_CONF_PENDING     0x0004
+#define L2CAP_CONF_EFS_REJECT  0x0005
 
 struct l2cap_conf_opt {
        __u8       type;
@@ -233,6 +278,8 @@ struct l2cap_conf_opt {
 #define L2CAP_CONF_QOS         0x03
 #define L2CAP_CONF_RFC         0x04
 #define L2CAP_CONF_FCS         0x05
+#define L2CAP_CONF_EFS         0x06
+#define L2CAP_CONF_EWS         0x07
 
 #define L2CAP_CONF_MAX_SIZE    22
 
@@ -251,6 +298,21 @@ struct l2cap_conf_rfc {
 #define L2CAP_MODE_ERTM                0x03
 #define L2CAP_MODE_STREAMING   0x04
 
+struct l2cap_conf_efs {
+       __u8    id;
+       __u8    stype;
+       __le16  msdu;
+       __le32  sdu_itime;
+       __le32  acc_lat;
+       __le32  flush_to;
+} __packed;
+
+#define L2CAP_SERV_NOTRAFIC    0x00
+#define L2CAP_SERV_BESTEFFORT  0x01
+#define L2CAP_SERV_GUARANTEED  0x02
+
+#define L2CAP_BESTEFFORT_ID    0x01
+
 struct l2cap_disconn_req {
        __le16     dcid;
        __le16     scid;
@@ -271,14 +333,57 @@ struct l2cap_info_rsp {
        __u8        data[0];
 } __packed;
 
+struct l2cap_create_chan_req {
+       __le16      psm;
+       __le16      scid;
+       __u8        amp_id;
+} __packed;
+
+struct l2cap_create_chan_rsp {
+       __le16      dcid;
+       __le16      scid;
+       __le16      result;
+       __le16      status;
+} __packed;
+
+struct l2cap_move_chan_req {
+       __le16      icid;
+       __u8        dest_amp_id;
+} __packed;
+
+struct l2cap_move_chan_rsp {
+       __le16      icid;
+       __le16      result;
+} __packed;
+
+#define L2CAP_MR_SUCCESS       0x0000
+#define L2CAP_MR_PEND          0x0001
+#define L2CAP_MR_BAD_ID                0x0002
+#define L2CAP_MR_SAME_ID       0x0003
+#define L2CAP_MR_NOT_SUPP      0x0004
+#define L2CAP_MR_COLLISION     0x0005
+#define L2CAP_MR_NOT_ALLOWED   0x0006
+
+struct l2cap_move_chan_cfm {
+       __le16      icid;
+       __le16      result;
+} __packed;
+
+#define L2CAP_MC_CONFIRMED     0x0000
+#define L2CAP_MC_UNCONFIRMED   0x0001
+
+struct l2cap_move_chan_cfm_rsp {
+       __le16      icid;
+} __packed;
+
 /* info type */
-#define L2CAP_IT_CL_MTU     0x0001
-#define L2CAP_IT_FEAT_MASK  0x0002
-#define L2CAP_IT_FIXED_CHAN 0x0003
+#define L2CAP_IT_CL_MTU                0x0001
+#define L2CAP_IT_FEAT_MASK     0x0002
+#define L2CAP_IT_FIXED_CHAN    0x0003
 
 /* info result */
-#define L2CAP_IR_SUCCESS    0x0000
-#define L2CAP_IR_NOTSUPP    0x0001
+#define L2CAP_IR_SUCCESS       0x0000
+#define L2CAP_IR_NOTSUPP       0x0001
 
 struct l2cap_conn_param_update_req {
        __le16      min;
@@ -297,7 +402,7 @@ struct l2cap_conn_param_update_rsp {
 
 /* ----- L2CAP channels and connections ----- */
 struct srej_list {
-       __u   tx_seq;
+       __u16   tx_seq;
        struct list_head list;
 };
 
@@ -319,14 +424,11 @@ struct l2cap_chan {
        __u16           flush_to;
        __u8            mode;
        __u8            chan_type;
+       __u8            chan_policy;
 
        __le16          sport;
 
        __u8            sec_level;
-       __u8            role_switch;
-       __u8            force_reliable;
-       __u8            flushable;
-       __u8            force_active;
 
        __u8            ident;
 
@@ -337,7 +439,8 @@ struct l2cap_chan {
 
        __u8            fcs;
 
-       __u8            tx_win;
+       __u16           tx_win;
+       __u16           tx_win_max;
        __u8            max_tx;
        __u16           retrans_timeout;
        __u16           monitor_timeout;
@@ -345,25 +448,40 @@ struct l2cap_chan {
 
        unsigned long   conf_state;
        unsigned long   conn_state;
-
-       __u8            next_tx_seq;
-       __u8            expected_ack_seq;
-       __u8            expected_tx_seq;
-       __u8            buffer_seq;
-       __u8            buffer_seq_srej;
-       __u8            srej_save_reqseq;
-       __u8            frames_sent;
-       __u8            unacked_frames;
+       unsigned long   flags;
+
+       __u16           next_tx_seq;
+       __u16           expected_ack_seq;
+       __u16           expected_tx_seq;
+       __u16           buffer_seq;
+       __u16           buffer_seq_srej;
+       __u16           srej_save_reqseq;
+       __u16           frames_sent;
+       __u16           unacked_frames;
        __u8            retry_count;
        __u8            num_acked;
        __u16           sdu_len;
        struct sk_buff  *sdu;
        struct sk_buff  *sdu_last_frag;
 
-       __u           remote_tx_win;
+       __u16           remote_tx_win;
        __u8            remote_max_tx;
        __u16           remote_mps;
 
+       __u8            local_id;
+       __u8            local_stype;
+       __u16           local_msdu;
+       __u32           local_sdu_itime;
+       __u32           local_acc_lat;
+       __u32           local_flush_to;
+
+       __u8            remote_id;
+       __u8            remote_stype;
+       __u16           remote_msdu;
+       __u32           remote_sdu_itime;
+       __u32           remote_acc_lat;
+       __u32           remote_flush_to;
+
        struct timer_list       chan_timer;
        struct timer_list       retrans_timer;
        struct timer_list       monitor_timer;
@@ -391,6 +509,7 @@ struct l2cap_ops {
 
 struct l2cap_conn {
        struct hci_conn *hcon;
+       struct hci_chan *hchan;
 
        bdaddr_t        *dst;
        bdaddr_t        *src;
@@ -445,6 +564,9 @@ enum {
        CONF_CONNECT_PEND,
        CONF_NO_FCS_RECV,
        CONF_STATE2_DEVICE,
+       CONF_EWS_RECV,
+       CONF_LOC_CONF_PEND,
+       CONF_REM_CONF_PEND,
 };
 
 #define L2CAP_CONF_MAX_CONF_REQ 2
@@ -462,6 +584,16 @@ enum {
        CONN_RNR_SENT,
 };
 
+/* Definitions for flags in l2cap_chan */
+enum {
+       FLAG_ROLE_SWITCH,
+       FLAG_FORCE_ACTIVE,
+       FLAG_FORCE_RELIABLE,
+       FLAG_FLUSHABLE,
+       FLAG_EXT_CTRL,
+       FLAG_EFS_ENABLE,
+};
+
 #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
 #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
 #define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
@@ -474,6 +606,22 @@ enum {
                L2CAP_DEFAULT_ACK_TO);
 #define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer)
 
+static inline int __seq_offset(struct l2cap_chan *chan, __u16 seq1, __u16 seq2)
+{
+       int offset;
+
+       offset = (seq1 - seq2) % (chan->tx_win_max + 1);
+       if (offset < 0)
+               offset += (chan->tx_win_max + 1);
+
+       return offset;
+}
+
+static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
+{
+       return (seq + 1) % (chan->tx_win_max + 1);
+}
+
 static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
 {
        int sub;
@@ -486,11 +634,162 @@ static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
        return sub == ch->remote_tx_win;
 }
 
-#define __get_txseq(ctrl)      (((ctrl) & L2CAP_CTRL_TXSEQ) >> 1)
-#define __get_reqseq(ctrl)     (((ctrl) & L2CAP_CTRL_REQSEQ) >> 8)
-#define __is_iframe(ctrl)      (!((ctrl) & L2CAP_CTRL_FRAME_TYPE))
-#define __is_sframe(ctrl)      ((ctrl) & L2CAP_CTRL_FRAME_TYPE)
-#define __is_sar_start(ctrl)   (((ctrl) & L2CAP_CTRL_SAR) == L2CAP_SDU_START)
+static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
+                                               L2CAP_EXT_CTRL_REQSEQ_SHIFT;
+       else
+               return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
+}
+
+static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
+                                                       L2CAP_EXT_CTRL_REQSEQ;
+       else
+               return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
+}
+
+static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
+                                               L2CAP_EXT_CTRL_TXSEQ_SHIFT;
+       else
+               return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
+}
+
+static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
+                                                       L2CAP_EXT_CTRL_TXSEQ;
+       else
+               return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
+}
+
+static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
+       else
+               return ctrl & L2CAP_CTRL_FRAME_TYPE;
+}
+
+static inline __u32 __set_sframe(struct l2cap_chan *chan)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return L2CAP_EXT_CTRL_FRAME_TYPE;
+       else
+               return L2CAP_CTRL_FRAME_TYPE;
+}
+
+static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
+       else
+               return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+}
+
+static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
+       else
+               return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
+}
+
+static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
+{
+       return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
+}
+
+static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return L2CAP_EXT_CTRL_SAR;
+       else
+               return L2CAP_CTRL_SAR;
+}
+
+static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
+                                               L2CAP_EXT_CTRL_SUPER_SHIFT;
+       else
+               return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
+}
+
+static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
+                                               L2CAP_EXT_CTRL_SUPERVISE;
+       else
+               return (super << L2CAP_CTRL_SUPER_SHIFT) &
+                                                       L2CAP_CTRL_SUPERVISE;
+}
+
+static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return L2CAP_EXT_CTRL_FINAL;
+       else
+               return L2CAP_CTRL_FINAL;
+}
+
+static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return ctrl & L2CAP_EXT_CTRL_FINAL;
+       else
+               return ctrl & L2CAP_CTRL_FINAL;
+}
+
+static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return L2CAP_EXT_CTRL_POLL;
+       else
+               return L2CAP_CTRL_POLL;
+}
+
+static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return ctrl & L2CAP_EXT_CTRL_POLL;
+       else
+               return ctrl & L2CAP_CTRL_POLL;
+}
+
+static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return get_unaligned_le32(p);
+       else
+               return get_unaligned_le16(p);
+}
+
+static inline void __put_control(struct l2cap_chan *chan, __u32 control,
+                                                               void *p)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return put_unaligned_le32(control, p);
+       else
+               return put_unaligned_le16(control, p);
+}
+
+static inline __u8 __ctrl_size(struct l2cap_chan *chan)
+{
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
+       else
+               return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
+}
 
 extern int disable_ertm;
 
@@ -507,7 +806,9 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
 void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan);
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+                                                               u32 priority);
 void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+int l2cap_chan_check_security(struct l2cap_chan *chan);
 
 #endif /* __L2CAP_H */
index d66da0f..3b68806 100644 (file)
 
 #define MGMT_INDEX_NONE                        0xFFFF
 
+#define MGMT_STATUS_SUCCESS            0x00
+#define MGMT_STATUS_UNKNOWN_COMMAND    0x01
+#define MGMT_STATUS_NOT_CONNECTED      0x02
+#define MGMT_STATUS_FAILED             0x03
+#define MGMT_STATUS_CONNECT_FAILED     0x04
+#define MGMT_STATUS_AUTH_FAILED                0x05
+#define MGMT_STATUS_NOT_PAIRED         0x06
+#define MGMT_STATUS_NO_RESOURCES       0x07
+#define MGMT_STATUS_TIMEOUT            0x08
+#define MGMT_STATUS_ALREADY_CONNECTED  0x09
+#define MGMT_STATUS_BUSY               0x0a
+#define MGMT_STATUS_REJECTED           0x0b
+#define MGMT_STATUS_NOT_SUPPORTED      0x0c
+#define MGMT_STATUS_INVALID_PARAMS     0x0d
+#define MGMT_STATUS_DISCONNECTED       0x0e
+#define MGMT_STATUS_NOT_POWERED                0x0f
+
 struct mgmt_hdr {
        __le16 opcode;
        __le16 index;
@@ -69,6 +86,10 @@ struct mgmt_mode {
 #define MGMT_OP_SET_POWERED            0x0005
 
 #define MGMT_OP_SET_DISCOVERABLE       0x0006
+struct mgmt_cp_set_discoverable {
+       __u8 val;
+       __u16 timeout;
+} __packed;
 
 #define MGMT_OP_SET_CONNECTABLE                0x0007
 
@@ -96,27 +117,29 @@ struct mgmt_cp_set_service_cache {
        __u8 enable;
 } __packed;
 
-struct mgmt_key_info {
+struct mgmt_link_key_info {
        bdaddr_t bdaddr;
        u8 type;
        u8 val[16];
        u8 pin_len;
-       u8 dlen;
-       u8 data[0];
 } __packed;
 
-#define MGMT_OP_LOAD_KEYS              0x000D
-struct mgmt_cp_load_keys {
+#define MGMT_OP_LOAD_LINK_KEYS         0x000D
+struct mgmt_cp_load_link_keys {
        __u8 debug_keys;
        __le16 key_count;
-       struct mgmt_key_info keys[0];
+       struct mgmt_link_key_info keys[0];
 } __packed;
 
-#define MGMT_OP_REMOVE_KEY             0x000E
-struct mgmt_cp_remove_key {
+#define MGMT_OP_REMOVE_KEYS            0x000E
+struct mgmt_cp_remove_keys {
        bdaddr_t bdaddr;
        __u8 disconnect;
 } __packed;
+struct mgmt_rp_remove_keys {
+       bdaddr_t bdaddr;
+       __u8 status;
+};
 
 #define MGMT_OP_DISCONNECT             0x000F
 struct mgmt_cp_disconnect {
@@ -124,12 +147,23 @@ struct mgmt_cp_disconnect {
 } __packed;
 struct mgmt_rp_disconnect {
        bdaddr_t bdaddr;
+       __u8 status;
+} __packed;
+
+#define MGMT_ADDR_BREDR                        0x00
+#define MGMT_ADDR_LE_PUBLIC            0x01
+#define MGMT_ADDR_LE_RANDOM            0x02
+#define MGMT_ADDR_INVALID              0xff
+
+struct mgmt_addr_info {
+       bdaddr_t bdaddr;
+       __u8 type;
 } __packed;
 
 #define MGMT_OP_GET_CONNECTIONS                0x0010
 struct mgmt_rp_get_connections {
        __le16 conn_count;
-       bdaddr_t conn[0];
+       struct mgmt_addr_info addr[0];
 } __packed;
 
 #define MGMT_OP_PIN_CODE_REPLY         0x0011
@@ -155,11 +189,11 @@ struct mgmt_cp_set_io_capability {
 
 #define MGMT_OP_PAIR_DEVICE            0x0014
 struct mgmt_cp_pair_device {
-       bdaddr_t bdaddr;
+       struct mgmt_addr_info addr;
        __u8 io_cap;
 } __packed;
 struct mgmt_rp_pair_device {
-       bdaddr_t bdaddr;
+       struct mgmt_addr_info addr;
        __u8 status;
 } __packed;
 
@@ -198,6 +232,9 @@ struct mgmt_cp_remove_remote_oob_data {
 } __packed;
 
 #define MGMT_OP_START_DISCOVERY                0x001B
+struct mgmt_cp_start_discovery {
+       __u8 type;
+} __packed;
 
 #define MGMT_OP_STOP_DISCOVERY         0x001C
 
@@ -216,6 +253,17 @@ struct mgmt_cp_set_fast_connectable {
        __u8 enable;
 } __packed;
 
+#define MGMT_OP_USER_PASSKEY_REPLY     0x0020
+struct mgmt_cp_user_passkey_reply {
+       bdaddr_t bdaddr;
+       __le32 passkey;
+} __packed;
+
+#define MGMT_OP_USER_PASSKEY_NEG_REPLY 0x0021
+struct mgmt_cp_user_passkey_neg_reply {
+       bdaddr_t bdaddr;
+} __packed;
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16 opcode;
@@ -245,26 +293,19 @@ struct mgmt_ev_controller_error {
 
 #define MGMT_EV_PAIRABLE               0x0009
 
-#define MGMT_EV_NEW_KEY                        0x000A
-struct mgmt_ev_new_key {
+#define MGMT_EV_NEW_LINK_KEY           0x000A
+struct mgmt_ev_new_link_key {
        __u8 store_hint;
-       struct mgmt_key_info key;
+       struct mgmt_link_key_info key;
 } __packed;
 
 #define MGMT_EV_CONNECTED              0x000B
-struct mgmt_ev_connected {
-       bdaddr_t bdaddr;
-       __u8 link_type;
-} __packed;
 
 #define MGMT_EV_DISCONNECTED           0x000C
-struct mgmt_ev_disconnected {
-       bdaddr_t bdaddr;
-} __packed;
 
 #define MGMT_EV_CONNECT_FAILED         0x000D
 struct mgmt_ev_connect_failed {
-       bdaddr_t bdaddr;
+       struct mgmt_addr_info addr;
        __u8 status;
 } __packed;
 
@@ -294,7 +335,7 @@ struct mgmt_ev_local_name_changed {
 
 #define MGMT_EV_DEVICE_FOUND           0x0012
 struct mgmt_ev_device_found {
-       bdaddr_t bdaddr;
+       struct mgmt_addr_info addr;
        __u8 dev_class[3];
        __s8 rssi;
        __u8 eir[HCI_MAX_EIR_LENGTH];
@@ -317,3 +358,8 @@ struct mgmt_ev_device_blocked {
 struct mgmt_ev_device_unblocked {
        bdaddr_t bdaddr;
 } __packed;
+
+#define MGMT_EV_USER_PASSKEY_REQUEST   0x0017
+struct mgmt_ev_user_passkey_request {
+       bdaddr_t bdaddr;
+} __packed;
index c011281..ef2dd94 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfcnfg.h>
+#include <net/caif/caif_device.h>
 #include <linux/caif/caif_socket.h>
 #include <linux/if.h>
 #include <linux/net.h>
@@ -104,4 +105,24 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer,
  */
 void caif_free_client(struct cflayer *adap_layer);
 
+/**
+ * struct caif_enroll_dev - Enroll a net-device as a CAIF Link layer
+ * @dev:               Network device to enroll.
+ * @caifdev:           Configuration information from CAIF Link Layer
+ * @link_support:      Link layer support layer
+ * @head_room:         Head room needed by link support layer
+ * @layer:             Lowest layer in CAIF stack
+ * @rcv_fun:           Receive function for CAIF stack.
+ *
+ * This function enroll a CAIF link layer into CAIF Stack and
+ * expects the interface to be able to handle CAIF payload.
+ * The link_support layer is used to add any Link Layer specific
+ * framing.
+ */
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+                       struct cflayer *link_support, int head_room,
+                       struct cflayer **layer, int (**rcv_func)(
+                               struct sk_buff *, struct net_device *,
+                               struct packet_type *, struct net_device *));
+
 #endif /* CAIF_DEV_H_ */
index 35bc788..0f3a391 100644 (file)
@@ -121,9 +121,7 @@ enum caif_direction {
  * @transmit:  Packet transmit funciton.
  * @ctrlcmd:   Used for control signalling upwards in the stack.
  * @modemcmd:  Used for control signaling downwards in the stack.
- * @prio:      Priority of this layer.
  * @id:                The identity of this layer
- * @type:      The type of this layer
  * @name:      Name of the layer.
  *
  *  This structure defines the layered structure in CAIF.
@@ -230,9 +228,7 @@ struct cflayer {
         */
        int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
 
-       unsigned short prio;
        unsigned int id;
-       unsigned int type;
        char name[CAIF_LAYER_NAME_SZ];
 };
 
index 87c3d11..aa6a485 100644 (file)
@@ -55,8 +55,8 @@
 struct cfspi_xfer {
        u16 tx_dma_len;
        u16 rx_dma_len;
-       void *va_tx;
-       dma_addr_t pa_tx;
+       void *va_tx[2];
+       dma_addr_t pa_tx[2];
        void *va_rx;
        dma_addr_t pa_rx;
 };
index 3e93a4a..90b4ff8 100644 (file)
 struct cfcnfg;
 
 /**
- * enum cfcnfg_phy_type -  Types of physical layers defined in CAIF Stack
- *
- * @CFPHYTYPE_FRAG:    Fragmented frames physical interface.
- * @CFPHYTYPE_CAIF:    Generic CAIF physical interface
- */
-enum cfcnfg_phy_type {
-       CFPHYTYPE_FRAG = 1,
-       CFPHYTYPE_CAIF,
-       CFPHYTYPE_MAX
-};
-
-/**
  * enum cfcnfg_phy_preference - Physical preference HW Abstraction
  *
  * @CFPHYPREF_UNSPECIFIED:     Default physical interface
@@ -66,21 +54,20 @@ void cfcnfg_remove(struct cfcnfg *cfg);
  * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
  * @cnfg:      Pointer to a CAIF configuration object, created by
  *             cfcnfg_create().
- * @phy_type:  Specifies the type of physical interface, e.g.
- *                     CFPHYTYPE_FRAG.
  * @dev:       Pointer to link layer device
  * @phy_layer: Specify the physical layer. The transmit function
  *             MUST be set in the structure.
  * @pref:      The phy (link layer) preference.
+ * @link_support: Protocol implementation for link layer specific protocol.
  * @fcs:       Specify if checksum is used in CAIF Framing Layer.
- * @stx:       Specify if Start Of Frame eXtention is used.
+ * @head_room: Head space needed by link specific protocol.
  */
-
 void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
-                    bool fcs, bool stx);
+                    struct cflayer *link_support,
+                    bool fcs, int head_room);
 
 /**
  * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
index b837432..f121299 100644 (file)
@@ -8,5 +8,5 @@
 #define CFSERL_H_
 #include <net/caif/caif_layer.h>
 
-struct cflayer *cfserl_create(int type, int instance, bool use_stx);
-#endif                         /* CFSERL_H_ */
+struct cflayer *cfserl_create(int instance, bool use_stx);
+#endif
index 95852e3..3de1c39 100644 (file)
@@ -391,6 +391,8 @@ struct cfg80211_crypto_settings {
  * @assocresp_ies: extra information element(s) to add into (Re)Association
  *     Response frames or %NULL
  * @assocresp_ies_len: length of assocresp_ies in octets
+ * @probe_resp_len: length of probe response template (@probe_resp)
+ * @probe_resp: probe response template (AP mode only)
  */
 struct beacon_parameters {
        u8 *head, *tail;
@@ -408,6 +410,8 @@ struct beacon_parameters {
        size_t proberesp_ies_len;
        const u8 *assocresp_ies;
        size_t assocresp_ies_len;
+       int probe_resp_len;
+       u8 *probe_resp;
 };
 
 /**
@@ -778,6 +782,7 @@ struct mesh_config {
        u16 min_discovery_timeout;
        u32 dot11MeshHWMPactivePathTimeout;
        u16 dot11MeshHWMPpreqMinInterval;
+       u16 dot11MeshHWMPperrMinInterval;
        u16 dot11MeshHWMPnetDiameterTraversalTime;
        u8  dot11MeshHWMPRootMode;
        u16 dot11MeshHWMPRannInterval;
@@ -798,6 +803,7 @@ struct mesh_config {
  * @ie_len: length of vendor information elements
  * @is_authenticated: this mesh requires authentication
  * @is_secure: this mesh uses security
+ * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
  *
  * These parameters are fixed when the mesh is created.
  */
@@ -810,6 +816,7 @@ struct mesh_setup {
        u8 ie_len;
        bool is_authenticated;
        bool is_secure;
+       int mcast_rate[IEEE80211_NUM_BANDS];
 };
 
 /**
@@ -1040,6 +1047,15 @@ struct cfg80211_auth_request {
 };
 
 /**
+ * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
+ *
+ * @ASSOC_REQ_DISABLE_HT:  Disable HT (802.11n)
+ */
+enum cfg80211_assoc_req_flags {
+       ASSOC_REQ_DISABLE_HT            = BIT(0),
+};
+
+/**
  * struct cfg80211_assoc_request - (Re)Association request data
  *
  * This structure provides information needed to complete IEEE 802.11
@@ -1050,6 +1066,10 @@ struct cfg80211_auth_request {
  * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
  * @crypto: crypto settings
  * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
+ * @flags:  See &enum cfg80211_assoc_req_flags
+ * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
+ *   will be used in ht_capa.  Un-supported values will be ignored.
+ * @ht_capa_mask:  The bits of ht_capa which are to be used.
  */
 struct cfg80211_assoc_request {
        struct cfg80211_bss *bss;
@@ -1057,6 +1077,9 @@ struct cfg80211_assoc_request {
        size_t ie_len;
        struct cfg80211_crypto_settings crypto;
        bool use_mfp;
+       u32 flags;
+       struct ieee80211_ht_cap ht_capa;
+       struct ieee80211_ht_cap ht_capa_mask;
 };
 
 /**
@@ -1126,6 +1149,7 @@ struct cfg80211_ibss_params {
        u8 *ssid;
        u8 *bssid;
        struct ieee80211_channel *channel;
+       enum nl80211_channel_type channel_type;
        u8 *ie;
        u8 ssid_len, ie_len;
        u16 beacon_interval;
@@ -1155,6 +1179,10 @@ struct cfg80211_ibss_params {
  * @key_len: length of WEP key for shared key authentication
  * @key_idx: index of WEP key for shared key authentication
  * @key: WEP key for shared key authentication
+ * @flags:  See &enum cfg80211_assoc_req_flags
+ * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
+ *   will be used in ht_capa.  Un-supported values will be ignored.
+ * @ht_capa_mask:  The bits of ht_capa which are to be used.
  */
 struct cfg80211_connect_params {
        struct ieee80211_channel *channel;
@@ -1168,6 +1196,9 @@ struct cfg80211_connect_params {
        struct cfg80211_crypto_settings crypto;
        const u8 *key;
        u8 key_len, key_idx;
+       u32 flags;
+       struct ieee80211_ht_cap ht_capa;
+       struct ieee80211_ht_cap ht_capa_mask;
 };
 
 /**
@@ -1342,6 +1373,9 @@ struct cfg80211_gtk_rekey_data {
  *     doesn't verify much. Note, however, that the passed netdev may be
  *     %NULL as well if the user requested changing the channel for the
  *     device itself, or for a monitor interface.
+ * @get_channel: Get the current operating channel, should return %NULL if
+ *     there's no single defined operating channel if for example the
+ *     device implements channel hopping for multi-channel virtual interfaces.
  *
  * @scan: Request to do a scan. If returning zero, the scan request is given
  *     the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1369,7 +1403,8 @@ struct cfg80211_gtk_rekey_data {
  *     have changed. The actual parameter values are available in
  *     struct wiphy. If returning an error, no value should be changed.
  *
- * @set_tx_power: set the transmit power according to the parameters
+ * @set_tx_power: set the transmit power according to the parameters,
+ *     the power passed is in mBm, to get dBm use MBM_TO_DBM().
  * @get_tx_power: store the current TX power into the dbm variable;
  *     return 0 if successful
  *
@@ -1432,6 +1467,11 @@ struct cfg80211_gtk_rekey_data {
  *
  * @tdls_mgmt: Transmit a TDLS management frame.
  * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
+ *
+ * @probe_client: probe an associated client, must return a cookie that it
+ *     later passes to cfg80211_probe_status().
+ *
+ * @set_noack_map: Set the NoAck Map for the TIDs.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -1585,7 +1625,7 @@ struct cfg80211_ops {
                          enum nl80211_channel_type channel_type,
                          bool channel_type_valid, unsigned int wait,
                          const u8 *buf, size_t len, bool no_cck,
-                         u64 *cookie);
+                         bool dont_wait_for_ack, u64 *cookie);
        int     (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
                                       struct net_device *dev,
                                       u64 cookie);
@@ -1621,6 +1661,15 @@ struct cfg80211_ops {
                             u16 status_code, const u8 *buf, size_t len);
        int     (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
                             u8 *peer, enum nl80211_tdls_operation oper);
+
+       int     (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
+                               const u8 *peer, u64 *cookie);
+
+       int     (*set_noack_map)(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 u16 noack_map);
+
+       struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
 };
 
 /*
@@ -1679,6 +1728,14 @@ struct cfg80211_ops {
  *     teardown packets should be sent through the @NL80211_CMD_TDLS_MGMT
  *     command. When this flag is not set, @NL80211_CMD_TDLS_OPER should be
  *     used for asking the driver/firmware to perform a TDLS operation.
+ * @WIPHY_FLAG_HAVE_AP_SME: device integrates AP SME
+ * @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
+ *     when there are virtual interfaces in AP mode by calling
+ *     cfg80211_report_obss_beacon().
+ * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
+ *     responds to probe-requests in hardware.
+ * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
+ * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
  */
 enum wiphy_flags {
        WIPHY_FLAG_CUSTOM_REGULATORY            = BIT(0),
@@ -1697,6 +1754,11 @@ enum wiphy_flags {
        WIPHY_FLAG_AP_UAPSD                     = BIT(14),
        WIPHY_FLAG_SUPPORTS_TDLS                = BIT(15),
        WIPHY_FLAG_TDLS_EXTERNAL_SETUP          = BIT(16),
+       WIPHY_FLAG_HAVE_AP_SME                  = BIT(17),
+       WIPHY_FLAG_REPORTS_OBSS                 = BIT(18),
+       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD        = BIT(19),
+       WIPHY_FLAG_OFFCHAN_TX                   = BIT(20),
+       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL        = BIT(21),
 };
 
 /**
@@ -1869,6 +1931,7 @@ struct wiphy_wowlan_support {
  * @software_iftypes: bitmask of software interface types, these are not
  *     subject to any restrictions since they are purely managed in SW.
  * @flags: wiphy flags, see &enum wiphy_flags
+ * @features: features advertised to nl80211, see &enum nl80211_feature_flags.
  * @bss_priv_size: each BSS struct has private data allocated with it,
  *     this variable determines its size
  * @max_scan_ssids: maximum number of SSIDs the device can scan for in
@@ -1907,6 +1970,10 @@ struct wiphy_wowlan_support {
  *     may request, if implemented.
  *
  * @wowlan: WoWLAN support information
+ *
+ * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
+ * @ht_capa_mod_mask:  Specify what ht_cap values can be over-ridden.
+ *     If null, then none can be over-ridden.
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -1928,7 +1995,9 @@ struct wiphy {
        /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
        u16 interface_modes;
 
-       u32 flags;
+       u32 flags, features;
+
+       u32 ap_sme_capa;
 
        enum cfg80211_signal_type signal_type;
 
@@ -1960,6 +2029,13 @@ struct wiphy {
        u32 available_antennas_tx;
        u32 available_antennas_rx;
 
+       /*
+        * Bitmap of supported protocols for probe response offloading
+        * see &enum nl80211_probe_resp_offload_support_attr. Only valid
+        * when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+        */
+       u32 probe_resp_offload;
+
        /* If multiple wiphys are registered and you're handed e.g.
         * a regular netdev with assigned ieee80211_ptr, you won't
         * know whether it points to a wiphy your driver has registered
@@ -1987,6 +2063,8 @@ struct wiphy {
        /* dir in debugfs: ieee80211/<wiphyname> */
        struct dentry *debugfsdir;
 
+       const struct ieee80211_ht_cap *ht_capa_mod_mask;
+
 #ifdef CONFIG_NET_NS
        /* the network namespace this phy lives in currently */
        struct net *_net;
@@ -2183,6 +2261,8 @@ struct wireless_dev {
 
        int beacon_interval;
 
+       u32 ap_unexpected_nlpid;
+
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
        struct {
@@ -2349,69 +2429,6 @@ extern int ieee80211_radiotap_iterator_next(
 extern const unsigned char rfc1042_header[6];
 extern const unsigned char bridge_tunnel_header[6];
 
-/* Parsed Information Elements */
-struct ieee802_11_elems {
-       u8 *ie_start;
-       size_t total_len;
-
-       /* pointers to IEs */
-       u8 *ssid;
-       u8 *supp_rates;
-       u8 *fh_params;
-       u8 *ds_params;
-       u8 *cf_params;
-       struct ieee80211_tim_ie *tim;
-       u8 *ibss_params;
-       u8 *challenge;
-       u8 *wpa;
-       u8 *rsn;
-       u8 *erp_info;
-       u8 *ext_supp_rates;
-       u8 *wmm_info;
-       u8 *wmm_param;
-       struct ieee80211_ht_cap *ht_cap_elem;
-       struct ieee80211_ht_info *ht_info_elem;
-       struct ieee80211_meshconf_ie *mesh_config;
-       u8 *mesh_id;
-       u8 *peering;
-       u8 *preq;
-       u8 *prep;
-       u8 *perr;
-       struct ieee80211_rann_ie *rann;
-       u8 *ch_switch_elem;
-       u8 *country_elem;
-       u8 *pwr_constr_elem;
-       u8 *quiet_elem; /* first quite element */
-       u8 *timeout_int;
-
-       /* length of them, respectively */
-       u8 ssid_len;
-       u8 supp_rates_len;
-       u8 fh_params_len;
-       u8 ds_params_len;
-       u8 cf_params_len;
-       u8 tim_len;
-       u8 ibss_params_len;
-       u8 challenge_len;
-       u8 wpa_len;
-       u8 rsn_len;
-       u8 erp_info_len;
-       u8 ext_supp_rates_len;
-       u8 wmm_info_len;
-       u8 wmm_param_len;
-       u8 mesh_id_len;
-       u8 peering_len;
-       u8 preq_len;
-       u8 prep_len;
-       u8 perr_len;
-       u8 ch_switch_elem_len;
-       u8 country_elem_len;
-       u8 pwr_constr_elem_len;
-       u8 quiet_elem_len;
-       u8 num_of_quiet_elem;   /* can be more the one */
-       u8 timeout_int_len;
-};
-
 /**
  * ieee80211_get_hdrlen_from_skb - get header length from data
  *
@@ -2636,8 +2653,10 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
  *
  * This informs cfg80211 that BSS information was found and
  * the BSS should be updated/added.
+ *
+ * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
  */
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
                          struct ieee80211_channel *channel,
                          struct ieee80211_mgmt *mgmt, size_t len,
@@ -2659,8 +2678,10 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
  *
  * This informs cfg80211 that BSS information was found and
  * the BSS should be updated/added.
+ *
+ * NOTE: Returns a referenced struct, must be released with cfg80211_put_bss()!
  */
-struct cfg80211_bss*
+struct cfg80211_bss * __must_check
 cfg80211_inform_bss(struct wiphy *wiphy,
                    struct ieee80211_channel *channel,
                    const u8 *bssid,
@@ -3189,6 +3210,74 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
 void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
                                     const u8 *bssid, bool preauth, gfp_t gfp);
 
+/**
+ * cfg80211_rx_spurious_frame - inform userspace about a spurious frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * a spurious class 3 frame was received, to be able to deauth the
+ * sender.
+ * Returns %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+                               const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
+ * @dev: The device the frame matched to
+ * @addr: the transmitter address
+ * @gfp: context flags
+ *
+ * This function is used in AP mode (only!) to inform userspace that
+ * an associated station sent a 4addr frame but that wasn't expected.
+ * It is allowed and desirable to send this event only once for each
+ * station to avoid event flooding.
+ * Returns %true if the frame was passed to userspace (or this failed
+ * for a reason other than not having a subscription.)
+ */
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+                                       const u8 *addr, gfp_t gfp);
+
+/**
+ * cfg80211_probe_status - notify userspace about probe status
+ * @dev: the device the probe was sent on
+ * @addr: the address of the peer
+ * @cookie: the cookie filled in @probe_client previously
+ * @acked: indicates whether probe was acked or not
+ * @gfp: allocation flags
+ */
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+                          u64 cookie, bool acked, gfp_t gfp);
+
+/**
+ * cfg80211_report_obss_beacon - report beacon from other APs
+ * @wiphy: The wiphy that received the beacon
+ * @frame: the frame
+ * @len: length of the frame
+ * @freq: frequency the frame was received on
+ * @gfp: allocation flags
+ *
+ * Use this function to report to userspace when a beacon was
+ * received. It is not useful to call this when there is no
+ * netdev that is in AP/GO mode.
+ */
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+                                const u8 *frame, size_t len,
+                                int freq, gfp_t gfp);
+
+/*
+ * cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
+ * @wiphy: the wiphy
+ * @chan: main channel
+ * @channel_type: HT mode
+ */
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+                                struct ieee80211_channel *chan,
+                                enum nl80211_channel_type channel_type);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index 839f768..7828ebf 100644 (file)
 #ifndef __LINUX_NET_DSA_H
 #define __LINUX_NET_DSA_H
 
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
 #define DSA_MAX_SWITCHES       4
 #define DSA_MAX_PORTS          12
 
@@ -54,8 +59,143 @@ struct dsa_platform_data {
        struct dsa_chip_data    *chip;
 };
 
-extern bool dsa_uses_dsa_tags(void *dsa_ptr);
-extern bool dsa_uses_trailer_tags(void *dsa_ptr);
+struct dsa_switch_tree {
+       /*
+        * Configuration data for the platform device that owns
+        * this dsa switch tree instance.
+        */
+       struct dsa_platform_data        *pd;
+
+       /*
+        * Reference to network device to use, and which tagging
+        * protocol to use.
+        */
+       struct net_device       *master_netdev;
+       __be16                  tag_protocol;
+
+       /*
+        * The switch and port to which the CPU is attached.
+        */
+       s8                      cpu_switch;
+       s8                      cpu_port;
+
+       /*
+        * Link state polling.
+        */
+       int                     link_poll_needed;
+       struct work_struct      link_poll_work;
+       struct timer_list       link_poll_timer;
+
+       /*
+        * Data for the individual switch chips.
+        */
+       struct dsa_switch       *ds[DSA_MAX_SWITCHES];
+};
+
+struct dsa_switch {
+       /*
+        * Parent switch tree, and switch index.
+        */
+       struct dsa_switch_tree  *dst;
+       int                     index;
+
+       /*
+        * Configuration data for this switch.
+        */
+       struct dsa_chip_data    *pd;
+
+       /*
+        * The used switch driver.
+        */
+       struct dsa_switch_driver        *drv;
+
+       /*
+        * Reference to mii bus to use.
+        */
+       struct mii_bus          *master_mii_bus;
+
+       /*
+        * Slave mii_bus and devices for the individual ports.
+        */
+       u32                     dsa_port_mask;
+       u32                     phys_port_mask;
+       struct mii_bus          *slave_mii_bus;
+       struct net_device       *ports[DSA_MAX_PORTS];
+};
+
+static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
+{
+       return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+}
+
+static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+{
+       struct dsa_switch_tree *dst = ds->dst;
+
+       /*
+        * If this is the root switch (i.e. the switch that connects
+        * to the CPU), return the cpu port number on this switch.
+        * Else return the (DSA) port number that connects to the
+        * switch that is one hop closer to the cpu.
+        */
+       if (dst->cpu_switch == ds->index)
+               return dst->cpu_port;
+       else
+               return ds->pd->rtable[dst->cpu_switch];
+}
+
+struct dsa_switch_driver {
+       struct list_head        list;
+
+       __be16                  tag_protocol;
+       int                     priv_size;
+
+       /*
+        * Probing and setup.
+        */
+       char    *(*probe)(struct mii_bus *bus, int sw_addr);
+       int     (*setup)(struct dsa_switch *ds);
+       int     (*set_addr)(struct dsa_switch *ds, u8 *addr);
+
+       /*
+        * Access to the switch's PHY registers.
+        */
+       int     (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+       int     (*phy_write)(struct dsa_switch *ds, int port,
+                            int regnum, u16 val);
+
+       /*
+        * Link state polling and IRQ handling.
+        */
+       void    (*poll_link)(struct dsa_switch *ds);
+
+       /*
+        * ethtool hardware statistics.
+        */
+       void    (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
+       void    (*get_ethtool_stats)(struct dsa_switch *ds,
+                                    int port, uint64_t *data);
+       int     (*get_sset_count)(struct dsa_switch *ds);
+};
+
+void register_switch_driver(struct dsa_switch_driver *type);
+void unregister_switch_driver(struct dsa_switch_driver *type);
+
+/*
+ * The original DSA tag format and some other tag formats have no
+ * ethertype, which means that we need to add a little hack to the
+ * networking receive path to make sure that received frames get
+ * the right ->protocol assigned to them when one of those tag
+ * formats is in use.
+ */
+static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst)
+{
+       return !!(dst->tag_protocol == htons(ETH_P_DSA));
+}
 
+static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst)
+{
+       return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
+}
 
 #endif
index 6faec1a..01343b0 100644 (file)
@@ -86,12 +86,12 @@ struct dst_entry {
        };
 };
 
-static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
 {
        return rcu_dereference(dst->_neighbour);
 }
 
-static inline struct neighbour *dst_get_neighbour_raw(struct dst_entry *dst)
+static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
 {
        return rcu_dereference_raw(dst->_neighbour);
 }
@@ -392,7 +392,7 @@ static inline void dst_confirm(struct dst_entry *dst)
                struct neighbour *n;
 
                rcu_read_lock();
-               n = dst_get_neighbour(dst);
+               n = dst_get_neighbour_noref(dst);
                neigh_confirm(n);
                rcu_read_unlock();
        }
index a094477..9192d69 100644 (file)
@@ -59,8 +59,11 @@ struct flowi4 {
 #define flowi4_proto           __fl_common.flowic_proto
 #define flowi4_flags           __fl_common.flowic_flags
 #define flowi4_secid           __fl_common.flowic_secid
-       __be32                  daddr;
+
+       /* (saddr,daddr) must be grouped, same order as in IP header */
        __be32                  saddr;
+       __be32                  daddr;
+
        union flowi_uli         uli;
 #define fl4_sport              uli.ports.sport
 #define fl4_dport              uli.ports.dport
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
new file mode 100644 (file)
index 0000000..80461c1
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _NET_FLOW_KEYS_H
+#define _NET_FLOW_KEYS_H
+
+struct flow_keys {
+       /* (src,dst) must be grouped, in the same way than in IP header */
+       __be32 src;
+       __be32 dst;
+       union {
+               __be32 ports;
+               __be16 port16[2];
+       };
+       u8 ip_proto;
+};
+
+extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+#endif
index 82d8d09..7db3299 100644 (file)
@@ -128,6 +128,8 @@ extern int genl_register_mc_group(struct genl_family *family,
                                  struct genl_multicast_group *grp);
 extern void genl_unregister_mc_group(struct genl_family *family,
                                     struct genl_multicast_group *grp);
+extern void genl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+                       u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
 /**
  * genlmsg_put - Add generic netlink header to netlink message
index f0698b9..75d6156 100644 (file)
@@ -31,8 +31,8 @@ struct icmp_err {
 extern const struct icmp_err icmp_err_convert[];
 #define ICMP_INC_STATS(net, field)     SNMP_INC_STATS((net)->mib.icmp_statistics, field)
 #define ICMP_INC_STATS_BH(net, field)  SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field)
-#define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS((net)->mib.icmpmsg_statistics, field+256)
-#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_BH((net)->mib.icmpmsg_statistics, field)
+#define ICMPMSGOUT_INC_STATS(net, field)       SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
+#define ICMPMSGIN_INC_STATS_BH(net, field)     SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
 
 struct dst_entry;
 struct net_proto_family;
index 7e2c4d4..7139254 100644 (file)
@@ -271,14 +271,6 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
 
 
-/* Ugly macro to convert literal channel numbers into their mhz equivalents
- * There are certianly some conditions that will break this (like feeding it '30')
- * but they shouldn't arise since nothing talks on channel 30. */
-#define ieee80211chan2mhz(x) \
-       (((x) <= 14) ? \
-       (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
-       ((x) + 1000) * 5)
-
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
 {
index d52685d..ee59f8b 100644 (file)
  * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
  * Maxim Osipov <maxim.osipov@siemens.com>
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  */
 
 #ifndef NET_IEEE802154_H
 #define NET_IEEE802154_H
 
+#define IEEE802154_MTU                 127
+
 #define IEEE802154_FC_TYPE_BEACON      0x0     /* Frame is beacon */
 #define        IEEE802154_FC_TYPE_DATA         0x1     /* Frame is data */
 #define IEEE802154_FC_TYPE_ACK         0x2     /* Frame is acknowledgment */
@@ -56,6 +59,9 @@
        (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
 
 
+/* MAC footer size */
+#define IEEE802154_MFR_SIZE    2 /* 2 octets */
+
 /* MAC's Command Frames Identifiers */
 #define IEEE802154_CMD_ASSOCIATION_REQ         0x01
 #define IEEE802154_CMD_ASSOCIATION_RESP                0x02
index e46674d..00cbb43 100644 (file)
@@ -15,7 +15,7 @@
 #define _INET6_HASHTABLES_H
 
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #include <linux/ipv6.h>
 #include <linux/types.h>
@@ -110,5 +110,5 @@ extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo
                                 const struct in6_addr *saddr, const __be16 sport,
                                 const struct in6_addr *daddr, const __be16 dport,
                                 const int dif);
-#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
index e6db62e..dbf9aab 100644 (file)
@@ -143,9 +143,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
        return (void *)inet_csk(sk)->icsk_ca_priv;
 }
 
-extern struct sock *inet_csk_clone(struct sock *sk,
-                                  const struct request_sock *req,
-                                  const gfp_t priority);
+extern struct sock *inet_csk_clone_lock(const struct sock *sk,
+                                       const struct request_sock *req,
+                                       const gfp_t priority);
 
 enum inet_csk_ack_state_t {
        ICSK_ACK_SCHED  = 1,
index f941964..e3e4051 100644 (file)
@@ -71,7 +71,7 @@ struct ip_options_data {
 
 struct inet_request_sock {
        struct request_sock     req;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        u16                     inet6_rsk_offset;
 #endif
        __be16                  loc_port;
@@ -139,7 +139,7 @@ struct rtable;
 struct inet_sock {
        /* sk and pinet6 has to be the first two members of inet_sock */
        struct sock             sk;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct ipv6_pinfo       *pinet6;
 #endif
        /* Socket demultiplex comparisons on incoming packets. */
@@ -188,7 +188,7 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
        memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
               sk_from->sk_prot->obj_size - ancestor_size);
 }
-#if !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE))
+#if !(IS_ENABLED(CONFIG_IPV6))
 static inline void inet_sk_copy_descendant(struct sock *sk_to,
                                           const struct sock *sk_from)
 {
index e8c25b9..ba52c83 100644 (file)
@@ -218,20 +218,12 @@ extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
 {
-#ifdef CONFIG_NET_NS
-       return rcu_dereference_raw(twsk->tw_net); /* protected by locking, */
-                                                 /* reference counting, */
-                                                 /* initialization, or RCU. */
-#else
-       return &init_net;
-#endif
+       return read_pnet(&twsk->tw_net);
 }
 
 static inline
 void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
 {
-#ifdef CONFIG_NET_NS
-       rcu_assign_pointer(twsk->tw_net, net);
-#endif
+       write_pnet(&twsk->tw_net, net);
 }
 #endif /* _INET_TIMEWAIT_SOCK_ */
index e9ff3fc..06b795d 100644 (file)
@@ -87,7 +87,7 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
 {
        struct inetpeer_addr daddr;
 
-       ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
+       *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
        daddr.family = AF_INET6;
        return inet_getpeer(&daddr, create);
 }
index eca0ef7..775009f 100644 (file)
@@ -353,14 +353,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast,
                memcpy(buf, &naddr, sizeof(naddr));
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
 static __inline__ void inet_reset_saddr(struct sock *sk)
 {
        inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == PF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
 
@@ -379,7 +379,7 @@ static inline int sk_mc_loop(struct sock *sk)
        switch (sk->sk_family) {
        case AF_INET:
                return inet_sk(sk)->mc_loop;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                return inet6_sk(sk)->mc_loop;
 #endif
@@ -450,7 +450,7 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
  *     Functions provided by ip_sockglue.c
  */
 
-extern int     ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+extern void    ipv4_pktinfo_prepare(struct sk_buff *skb);
 extern void    ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
 extern int     ip_cmsg_send(struct net *net,
                             struct msghdr *msg, struct ipcm_cookie *ipc);
index 5e91b72..789d5f4 100644 (file)
@@ -95,14 +95,14 @@ extern struct rt6_info              *rt6_lookup(struct net *net,
 
 extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                                         struct neighbour *neigh,
-                                        const struct in6_addr *addr);
+                                        struct flowi6 *fl6);
 extern int icmp6_dst_gc(void);
 
 extern void fib6_force_start_gc(struct net *net);
 
 extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                           const struct in6_addr *addr,
-                                          int anycast);
+                                          bool anycast);
 
 extern int                     ip6_dst_hoplimit(struct dst_entry *dst);
 
index 873d5be..48fd12e 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/netfilter.h>           /* for union nf_inet_addr */
 #include <linux/ip.h>
 #include <linux/ipv6.h>                        /* for struct ipv6hdr */
-#include <net/ipv6.h>                  /* for ipv6_addr_copy */
+#include <net/ipv6.h>
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -119,8 +119,8 @@ ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
                const struct ipv6hdr *iph = nh;
                iphdr->len = sizeof(struct ipv6hdr);
                iphdr->protocol = iph->nexthdr;
-               ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
-               ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
+               iphdr->saddr.in6 = iph->saddr;
+               iphdr->daddr.in6 = iph->daddr;
        } else
 #endif
        {
@@ -137,7 +137,7 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
 {
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6)
-               ipv6_addr_copy(&dst->in6, &src->in6);
+               dst->in6 = src->in6;
        else
 #endif
        dst->ip = src->ip;
index a366a8a..e4170a2 100644 (file)
@@ -132,6 +132,15 @@ extern struct ctl_path net_ipv6_ctl_path[];
        SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\
 })
 
+/* per device and per net counters are atomic_long_t */
+#define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field)              \
+({                                                                     \
+       struct inet6_dev *_idev = (idev);                               \
+       if (likely(_idev != NULL))                                      \
+               SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
+       SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
+})
+
 #define _DEVADD(net, statname, modifier, idev, field, val)             \
 ({                                                                     \
        struct inet6_dev *_idev = (idev);                               \
@@ -168,11 +177,11 @@ extern struct ctl_path net_ipv6_ctl_path[];
                _DEVINCATOMIC(net, icmpv6, _BH, idev, field)
 
 #define ICMP6MSGOUT_INC_STATS(net, idev, field)                \
-       _DEVINCATOMIC(net, icmpv6msg, , idev, field +256)
+       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
 #define ICMP6MSGOUT_INC_STATS_BH(net, idev, field)     \
-       _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field +256)
+       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
 #define ICMP6MSGIN_INC_STATS_BH(net, idev, field)      \
-       _DEVINCATOMIC(net, icmpv6msg, _BH, idev, field)
+       _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
 
 struct ip6_ra_chain {
        struct ip6_ra_chain     *next;
@@ -300,11 +309,6 @@ ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
                  ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
 }
 
-static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
-{
-       memcpy(a1, a2, sizeof(struct in6_addr));
-}
-
 static inline void ipv6_addr_prefix(struct in6_addr *pfx, 
                                    const struct in6_addr *addr,
                                    int plen)
@@ -554,7 +558,7 @@ extern void                 ipv6_push_frag_opts(struct sk_buff *skb,
                                                    u8 *proto);
 
 extern int                     ipv6_skip_exthdr(const struct sk_buff *, int start,
-                                                u8 *nexthdrp);
+                                                u8 *nexthdrp, __be16 *frag_offp);
 
 extern int                     ipv6_ext_hdr(u8 nexthdr);
 
index 72eddd1..5b5c8a7 100644 (file)
@@ -166,6 +166,7 @@ struct ieee80211_low_level_stats {
  *     that it is only ever disabled for station mode.
  * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
  * @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
+ * @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -184,6 +185,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_QOS                 = 1<<13,
        BSS_CHANGED_IDLE                = 1<<14,
        BSS_CHANGED_SSID                = 1<<15,
+       BSS_CHANGED_AP_PROBE_RESP       = 1<<16,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -518,7 +520,7 @@ struct ieee80211_tx_rate {
  * @flags: transmit info flags, defined above
  * @band: the band to transmit on (use for checking for races)
  * @antenna_sel_tx: antenna to use, 0 for automatic diversity
- * @pad: padding, ignore
+ * @ack_frame_id: internal frame ID for TX status, used internally
  * @control: union for control data
  * @status: union for status data
  * @driver_data: array of driver_data pointers
@@ -535,8 +537,7 @@ struct ieee80211_tx_info {
 
        u8 antenna_sel_tx;
 
-       /* 2 byte hole */
-       u8 pad[2];
+       u16 ack_frame_id;
 
        union {
                struct {
@@ -901,6 +902,10 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
  * @IEEE80211_KEY_FLAG_SW_MGMT: This flag should be set by the driver for a
  *     CCMP key if it requires CCMP encryption of management frames (MFP) to
  *     be done in software.
+ * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
+ *     for a CCMP key if space should be prepared for the IV, but the IV
+ *     itself should not be generated. Do not set together with
+ *     @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
  */
 enum ieee80211_key_flags {
        IEEE80211_KEY_FLAG_WMM_STA      = 1<<0,
@@ -908,6 +913,7 @@ enum ieee80211_key_flags {
        IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
        IEEE80211_KEY_FLAG_PAIRWISE     = 1<<3,
        IEEE80211_KEY_FLAG_SW_MGMT      = 1<<4,
+       IEEE80211_KEY_FLAG_PUT_IV_SPACE = 1<<5,
 };
 
 /**
@@ -1304,6 +1310,16 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
 }
 
 /**
+ * ieee80211_free_txskb - free TX skb
+ * @hw: the hardware
+ * @skb: the skb
+ *
+ * Free a transmit skb. Use this funtion when some failure
+ * to transmit happened and thus status cannot be reported.
+ */
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
+
+/**
  * DOC: Hardware crypto acceleration
  *
  * mac80211 is capable of taking advantage of many hardware
@@ -1744,11 +1760,21 @@ enum ieee80211_frame_release_type {
  *     skb contains the buffer starting from the IEEE 802.11 header.
  *     The low-level driver should send the frame out based on
  *     configuration in the TX control data. This handler should,
- *     preferably, never fail and stop queues appropriately, more
- *     importantly, however, it must never fail for A-MPDU-queues.
- *     This function should return NETDEV_TX_OK except in very
- *     limited cases.
- *     Must be implemented and atomic.
+ *     preferably, never fail and stop queues appropriately.
+ *     This must be implemented if @tx_frags is not.
+ *     Must be atomic.
+ *
+ * @tx_frags: Called to transmit multiple fragments of a single MSDU.
+ *     This handler must consume all fragments, sending out some of
+ *     them only is useless and it can't ask for some of them to be
+ *     queued again. If the frame is not fragmented the queue has a
+ *     single SKB only. To avoid issues with the networking stack
+ *     when TX status is reported the frames should be removed from
+ *     the skb queue.
+ *     If this is used, the tx_info @vif and @sta pointers will be
+ *     invalid -- you must not use them in that case.
+ *     This must be implemented if @tx isn't.
+ *     Must be atomic.
  *
  * @start: Called before the first netdevice attached to the hardware
  *     is enabled. This should turn on the hardware and must turn on
@@ -2085,6 +2111,8 @@ enum ieee80211_frame_release_type {
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
+       void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        struct ieee80211_sta *sta, struct sk_buff_head *skbs);
        int (*start)(struct ieee80211_hw *hw);
        void (*stop)(struct ieee80211_hw *hw);
 #ifdef CONFIG_PM
@@ -2661,6 +2689,19 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
 }
 
 /**
+ * ieee80211_proberesp_get - retrieve a Probe Response template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Probe Response template which can, for example, be uploaded to
+ * hardware. The destination address should be set by the caller.
+ *
+ * Can only be called in AP mode.
+ */
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif);
+
+/**
  * ieee80211_pspoll_get - retrieve a PS Poll template
  * @hw: pointer obtained from ieee80211_alloc_hw().
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
index 62beeb9..c977c37 100644 (file)
@@ -145,13 +145,4 @@ int ndisc_ifinfo_sysctl_strategy(ctl_table *ctl,
 extern void                    inet6_ifinfo_notify(int event,
                                                    struct inet6_dev *idev);
 
-static inline struct neighbour * ndisc_get_neigh(struct net_device *dev, const struct in6_addr *addr)
-{
-
-       if (dev)
-               return __neigh_lookup_errno(&nd_tbl, addr, dev);
-
-       return ERR_PTR(-ENODEV);
-}
-
 #endif
index 2720884..6814c4d 100644 (file)
@@ -43,7 +43,6 @@ struct neigh_parms {
 #endif
        struct net_device *dev;
        struct neigh_parms *next;
-       int     (*neigh_setup)(struct neighbour *);
        void    (*neigh_cleanup)(struct neighbour *);
        struct neigh_table *tbl;
 
@@ -59,7 +58,7 @@ struct neigh_parms {
        int     reachable_time;
        int     delay_probe_time;
 
-       int     queue_len;
+       int     queue_len_bytes;
        int     ucast_probes;
        int     app_probes;
        int     mcast_probes;
@@ -99,6 +98,7 @@ struct neighbour {
        rwlock_t                lock;
        atomic_t                refcnt;
        struct sk_buff_head     arp_queue;
+       unsigned int            arp_queue_len_bytes;
        struct timer_list       timer;
        unsigned long           used;
        atomic_t                probes;
@@ -172,12 +172,18 @@ struct neigh_table {
        atomic_t                entries;
        rwlock_t                lock;
        unsigned long           last_rand;
-       struct kmem_cache       *kmem_cachep;
        struct neigh_statistics __percpu *stats;
        struct neigh_hash_table __rcu *nht;
        struct pneigh_entry     **phash_buckets;
 };
 
+#define NEIGH_PRIV_ALIGN       sizeof(long long)
+
+static inline void *neighbour_priv(const struct neighbour *n)
+{
+       return (char *)n + ALIGN(sizeof(*n) + n->tbl->key_len, NEIGH_PRIV_ALIGN);
+}
+
 /* flags for neigh_update() */
 #define NEIGH_UPDATE_F_OVERRIDE                        0x00000001
 #define NEIGH_UPDATE_F_WEAK_OVERRIDE           0x00000002
index 3bb6fa0..ee547c1 100644 (file)
@@ -77,7 +77,7 @@ struct net {
        struct netns_packet     packet;
        struct netns_unix       unx;
        struct netns_ipv4       ipv4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
index e505358..75ca929 100644 (file)
@@ -131,7 +131,7 @@ nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
        return sk;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline struct sock *
 nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
index d786b4f..bbd023a 100644 (file)
@@ -55,6 +55,7 @@ struct netns_ipv4 {
        int current_rt_cache_rebuild_count;
 
        unsigned int sysctl_ping_group_range[2];
+       long sysctl_tcp_mem[3];
 
        atomic_t rt_genid;
        atomic_t dev_addr_genid;
index 0b44112..d542a4b 100644 (file)
@@ -10,15 +10,15 @@ struct netns_mib {
        DEFINE_SNMP_STAT(struct udp_mib, udp_statistics);
        DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics);
        DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics);
-       DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics);
+       DEFINE_SNMP_STAT_ATOMIC(struct icmpmsg_mib, icmpmsg_statistics);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct proc_dir_entry *proc_net_devsnmp6;
        DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6);
        DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6);
        DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics);
        DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics);
-       DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics);
+       DEFINE_SNMP_STAT_ATOMIC(struct icmpv6msg_mib, icmpv6msg_statistics);
 #endif
 #ifdef CONFIG_XFRM_STATISTICS
        DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
index 748f91f..5299e69 100644 (file)
@@ -56,7 +56,7 @@ struct netns_xfrm {
 #endif
 
        struct dst_ops          xfrm4_dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
 };
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
new file mode 100644 (file)
index 0000000..e503b87
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * netprio_cgroup.h                    Control Group Priority set
+ *
+ *
+ * Authors:    Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _NETPRIO_CGROUP_H
+#define _NETPRIO_CGROUP_H
+#include <linux/module.h>
+#include <linux/cgroup.h>
+#include <linux/hardirq.h>
+#include <linux/rcupdate.h>
+
+
+struct netprio_map {
+       struct rcu_head rcu;
+       u32 priomap_len;
+       u32 priomap[];
+};
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup_netprio_state {
+       struct cgroup_subsys_state css;
+       u32 prioidx;
+};
+
+#ifndef CONFIG_NETPRIO_CGROUP
+extern int net_prio_subsys_id;
+#endif
+
+extern void sock_update_netprioidx(struct sock *sk);
+
+static inline struct cgroup_netprio_state
+               *task_netprio_state(struct task_struct *p)
+{
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+       return container_of(task_subsys_state(p, net_prio_subsys_id),
+                           struct cgroup_netprio_state, css);
+#else
+       return NULL;
+#endif
+}
+
+#else
+
+#define sock_update_netprioidx(sk)
+#endif
+
+#endif  /* _NET_CLS_CGROUP_H */
index 39b85bc..b61eb6c 100644 (file)
 #define NCI_MAX_NUM_CONN                                       10
 
 /* NCI Status Codes */
-#define        NCI_STATUS_OK                                           0x00
-#define        NCI_STATUS_REJECTED                                     0x01
-#define        NCI_STATUS_MESSAGE_CORRUPTED                            0x02
-#define        NCI_STATUS_BUFFER_FULL                                  0x03
-#define        NCI_STATUS_FAILED                                       0x04
-#define        NCI_STATUS_NOT_INITIALIZED                              0x05
-#define        NCI_STATUS_SYNTAX_ERROR                                 0x06
-#define        NCI_STATUS_SEMANTIC_ERROR                               0x07
-#define        NCI_STATUS_UNKNOWN_GID                                  0x08
-#define        NCI_STATUS_UNKNOWN_OID                                  0x09
-#define        NCI_STATUS_INVALID_PARAM                                0x0a
-#define        NCI_STATUS_MESSAGE_SIZE_EXCEEDED                        0x0b
+#define NCI_STATUS_OK                                          0x00
+#define NCI_STATUS_REJECTED                                    0x01
+#define NCI_STATUS_RF_FRAME_CORRUPTED                          0x02
+#define NCI_STATUS_FAILED                                      0x03
+#define NCI_STATUS_NOT_INITIALIZED                             0x04
+#define NCI_STATUS_SYNTAX_ERROR                                        0x05
+#define NCI_STATUS_SEMANTIC_ERROR                              0x06
+#define NCI_STATUS_UNKNOWN_GID                                 0x07
+#define NCI_STATUS_UNKNOWN_OID                                 0x08
+#define NCI_STATUS_INVALID_PARAM                               0x09
+#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED                       0x0a
 /* Discovery Specific Status Codes */
-#define        NCI_STATUS_DISCOVERY_ALREADY_STARTED                    0xa0
-#define        NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED           0xa1
+#define NCI_STATUS_DISCOVERY_ALREADY_STARTED                   0xa0
+#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED          0xa1
+#define NCI_STATUS_DISCOVERY_TEAR_DOWN                         0xa2
 /* RF Interface Specific Status Codes */
-#define        NCI_STATUS_RF_TRANSMISSION_ERROR                        0xb0
-#define        NCI_STATUS_RF_PROTOCOL_ERROR                            0xb1
-#define        NCI_STATUS_RF_TIMEOUT_ERROR                             0xb2
-#define        NCI_STATUS_RF_LINK_LOSS_ERROR                           0xb3
+#define NCI_STATUS_RF_TRANSMISSION_ERROR                       0xb0
+#define NCI_STATUS_RF_PROTOCOL_ERROR                           0xb1
+#define NCI_STATUS_RF_TIMEOUT_ERROR                            0xb2
 /* NFCEE Interface Specific Status Codes */
-#define        NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED          0xc0
-#define        NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED            0xc1
-#define        NCI_STATUS_NFCEE_TRANSMISSION_ERROR                     0xc2
-#define        NCI_STATUS_NFCEE_PROTOCOL_ERROR                         0xc3
+#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED         0xc0
+#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED           0xc1
+#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR                    0xc2
+#define NCI_STATUS_NFCEE_PROTOCOL_ERROR                                0xc3
 #define NCI_STATUS_NFCEE_TIMEOUT_ERROR                         0xc4
 
 /* NCI RF Technology and Mode */
 #define NCI_NFC_A_ACTIVE_LISTEN_MODE                           0x83
 #define NCI_NFC_F_ACTIVE_LISTEN_MODE                           0x85
 
+/* NCI RF Technologies */
+#define NCI_NFC_RF_TECHNOLOGY_A                                        0x00
+#define NCI_NFC_RF_TECHNOLOGY_B                                        0x01
+#define NCI_NFC_RF_TECHNOLOGY_F                                        0x02
+#define NCI_NFC_RF_TECHNOLOGY_15693                            0x03
+
+/* NCI Bit Rates */
+#define NCI_NFC_BIT_RATE_106                                   0x00
+#define NCI_NFC_BIT_RATE_212                                   0x01
+#define NCI_NFC_BIT_RATE_424                                   0x02
+#define NCI_NFC_BIT_RATE_848                                   0x03
+#define NCI_NFC_BIT_RATE_1696                                  0x04
+#define NCI_NFC_BIT_RATE_3392                                  0x05
+#define NCI_NFC_BIT_RATE_6784                                  0x06
+
 /* NCI RF Protocols */
 #define NCI_RF_PROTOCOL_UNKNOWN                                        0x00
 #define NCI_RF_PROTOCOL_T1T                                    0x01
 #define NCI_RF_PROTOCOL_NFC_DEP                                        0x05
 
 /* NCI RF Interfaces */
-#define NCI_RF_INTERFACE_RFU                                   0x00
-#define        NCI_RF_INTERFACE_FRAME                                  0x01
-#define        NCI_RF_INTERFACE_ISO_DEP                                0x02
-#define        NCI_RF_INTERFACE_NFC_DEP                                0x03
+#define NCI_RF_INTERFACE_NFCEE_DIRECT                          0x00
+#define NCI_RF_INTERFACE_FRAME                                 0x01
+#define NCI_RF_INTERFACE_ISO_DEP                               0x02
+#define NCI_RF_INTERFACE_NFC_DEP                               0x03
+
+/* NCI Reset types */
+#define NCI_RESET_TYPE_KEEP_CONFIG                             0x00
+#define NCI_RESET_TYPE_RESET_CONFIG                            0x01
+
+/* NCI Static RF connection ID */
+#define NCI_STATIC_RF_CONN_ID                                  0x00
+
+/* NCI Data Flow Control */
+#define NCI_DATA_FLOW_CONTROL_NOT_USED                         0xff
 
 /* NCI RF_DISCOVER_MAP_CMD modes */
 #define NCI_DISC_MAP_MODE_POLL                                 0x01
 
 /* NCI Discovery Types */
 #define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE                      0x00
-#define        NCI_DISCOVERY_TYPE_POLL_B_PASSIVE                       0x01
-#define        NCI_DISCOVERY_TYPE_POLL_F_PASSIVE                       0x02
-#define        NCI_DISCOVERY_TYPE_POLL_A_ACTIVE                        0x03
-#define        NCI_DISCOVERY_TYPE_POLL_F_ACTIVE                        0x05
-#define        NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE                     0x06
-#define        NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE                     0x07
-#define        NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE                      0x09
-#define        NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE                     0x80
-#define        NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE                     0x81
-#define        NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE                     0x82
-#define        NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE                      0x83
-#define        NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE                      0x85
+#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE                      0x01
+#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE                      0x02
+#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE                       0x03
+#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE                       0x05
+#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE                     0x09
+#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE                    0x80
+#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE                    0x81
+#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE                    0x82
+#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE                     0x83
+#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE                     0x85
 
 /* NCI Deactivation Type */
-#define        NCI_DEACTIVATE_TYPE_IDLE_MODE                           0x00
-#define        NCI_DEACTIVATE_TYPE_SLEEP_MODE                          0x01
-#define        NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE                       0x02
-#define        NCI_DEACTIVATE_TYPE_RF_LINK_LOSS                        0x03
-#define        NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR                     0x04
+#define NCI_DEACTIVATE_TYPE_IDLE_MODE                          0x00
+#define NCI_DEACTIVATE_TYPE_SLEEP_MODE                         0x01
+#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE                      0x02
+#define NCI_DEACTIVATE_TYPE_DISCOVERY                          0x03
 
 /* Message Type (MT) */
 #define NCI_MT_DATA_PKT                                                0x00
 #define nci_conn_id(hdr)               (__u8)(((hdr)[0])&0x0f)
 
 /* GID values */
-#define        NCI_GID_CORE                                            0x0
-#define        NCI_GID_RF_MGMT                                         0x1
-#define        NCI_GID_NFCEE_MGMT                                      0x2
-#define        NCI_GID_PROPRIETARY                                     0xf
+#define NCI_GID_CORE                                           0x0
+#define NCI_GID_RF_MGMT                                                0x1
+#define NCI_GID_NFCEE_MGMT                                     0x2
+#define NCI_GID_PROPRIETARY                                    0xf
 
 /* ---- NCI Packet structures ---- */
 #define NCI_CTRL_HDR_SIZE                                      3
@@ -169,18 +190,11 @@ struct nci_data_hdr {
 /* -----  NCI Commands ---- */
 /* ------------------------ */
 #define NCI_OP_CORE_RESET_CMD          nci_opcode_pack(NCI_GID_CORE, 0x00)
-
-#define NCI_OP_CORE_INIT_CMD           nci_opcode_pack(NCI_GID_CORE, 0x01)
-
-#define NCI_OP_CORE_SET_CONFIG_CMD     nci_opcode_pack(NCI_GID_CORE, 0x02)
-
-#define NCI_OP_CORE_CONN_CREATE_CMD    nci_opcode_pack(NCI_GID_CORE, 0x04)
-struct nci_core_conn_create_cmd {
-       __u8    target_handle;
-       __u8    num_target_specific_params;
+struct nci_core_reset_cmd {
+       __u8    reset_type;
 } __packed;
 
-#define NCI_OP_CORE_CONN_CLOSE_CMD     nci_opcode_pack(NCI_GID_CORE, 0x06)
+#define NCI_OP_CORE_INIT_CMD           nci_opcode_pack(NCI_GID_CORE, 0x01)
 
 #define NCI_OP_RF_DISCOVER_MAP_CMD     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 struct disc_map_config {
@@ -218,6 +232,7 @@ struct nci_rf_deactivate_cmd {
 struct nci_core_reset_rsp {
        __u8    status;
        __u8    nci_ver;
+       __u8    config_status;
 } __packed;
 
 #define NCI_OP_CORE_INIT_RSP           nci_opcode_pack(NCI_GID_CORE, 0x01)
@@ -232,24 +247,14 @@ struct nci_core_init_rsp_1 {
 struct nci_core_init_rsp_2 {
        __u8    max_logical_connections;
        __le16  max_routing_table_size;
-       __u8    max_control_packet_payload_length;
-       __le16  rf_sending_buffer_size;
-       __le16  rf_receiving_buffer_size;
-       __le16  manufacturer_id;
-} __packed;
-
-#define NCI_OP_CORE_SET_CONFIG_RSP     nci_opcode_pack(NCI_GID_CORE, 0x02)
-
-#define NCI_OP_CORE_CONN_CREATE_RSP    nci_opcode_pack(NCI_GID_CORE, 0x04)
-struct nci_core_conn_create_rsp {
-       __u8    status;
-       __u8    max_pkt_payload_size;
+       __u8    max_ctrl_pkt_payload_len;
+       __le16  max_size_for_large_params;
+       __u8    max_data_pkt_payload_size;
        __u8    initial_num_credits;
-       __u8    conn_id;
+       __u8    manufact_id;
+       __le32  manufact_specific_info;
 } __packed;
 
-#define NCI_OP_CORE_CONN_CLOSE_RSP     nci_opcode_pack(NCI_GID_CORE, 0x06)
-
 #define NCI_OP_RF_DISCOVER_MAP_RSP     nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
 
 #define NCI_OP_RF_DISCOVER_RSP         nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@@ -270,12 +275,7 @@ struct nci_core_conn_credit_ntf {
        struct conn_credit_entry        conn_entries[NCI_MAX_NUM_CONN];
 } __packed;
 
-#define NCI_OP_RF_FIELD_INFO_NTF       nci_opcode_pack(NCI_GID_CORE, 0x08)
-struct nci_rf_field_info_ntf {
-       __u8    rf_field_status;
-} __packed;
-
-#define NCI_OP_RF_ACTIVATE_NTF         nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
+#define NCI_OP_RF_INTF_ACTIVATED_NTF   nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
 struct rf_tech_specific_params_nfca_poll {
        __u16   sens_res;
        __u8    nfcid1_len;     /* 0, 4, 7, or 10 Bytes */
@@ -289,17 +289,20 @@ struct activation_params_nfca_poll_iso_dep {
        __u8    rats_res[20];
 };
 
-struct nci_rf_activate_ntf {
-       __u8    target_handle;
+struct nci_rf_intf_activated_ntf {
+       __u8    rf_discovery_id;
+       __u8    rf_interface_type;
        __u8    rf_protocol;
-       __u8    rf_tech_and_mode;
+       __u8    activation_rf_tech_and_mode;
        __u8    rf_tech_specific_params_len;
 
        union {
                struct rf_tech_specific_params_nfca_poll nfca_poll;
        } rf_tech_specific_params;
 
-       __u8    rf_interface_type;
+       __u8    data_exch_rf_tech_and_mode;
+       __u8    data_exch_tx_bit_rate;
+       __u8    data_exch_rx_bit_rate;
        __u8    activation_params_len;
 
        union {
@@ -309,5 +312,9 @@ struct nci_rf_activate_ntf {
 } __packed;
 
 #define NCI_OP_RF_DEACTIVATE_NTF       nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
+struct nci_rf_deactivate_ntf {
+       __u8    type;
+       __u8    reason;
+} __packed;
 
 #endif /* __NCI_H */
index b8b4bbd..c92b69d 100644 (file)
@@ -109,15 +109,12 @@ struct nci_dev {
                                [NCI_MAX_SUPPORTED_RF_INTERFACES];
        __u8                    max_logical_connections;
        __u16                   max_routing_table_size;
-       __u8                    max_control_packet_payload_length;
-       __u16                   rf_sending_buffer_size;
-       __u16                   rf_receiving_buffer_size;
-       __u16                   manufacturer_id;
-
-       /* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */
-       __u8                    max_pkt_payload_size;
+       __u8                    max_ctrl_pkt_payload_len;
+       __u16                   max_size_for_large_params;
+       __u8                    max_data_pkt_payload_size;
        __u8                    initial_num_credits;
-       __u8                    conn_id;
+       __u8                    manufact_id;
+       __u32                   manufact_specific_info;
 
        /* stored during nci_data_exchange */
        data_exchange_cb_t      data_exchange_cb;
index 6f7eb80..875f489 100644 (file)
@@ -25,7 +25,7 @@
 #define _PROTOCOL_H
 
 #include <linux/in6.h>
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/ipv6.h>
 #endif
 
@@ -38,7 +38,7 @@ struct net_protocol {
        void                    (*err_handler)(struct sk_buff *skb, u32 info);
        int                     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff         *(*gso_segment)(struct sk_buff *skb,
-                                              u32 features);
+                                              netdev_features_t features);
        struct sk_buff        **(*gro_receive)(struct sk_buff **head,
                                               struct sk_buff *skb);
        int                     (*gro_complete)(struct sk_buff *skb);
@@ -46,7 +46,7 @@ struct net_protocol {
                                netns_ok:1;
 };
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct inet6_protocol {
        int     (*handler)(struct sk_buff *skb);
 
@@ -57,7 +57,7 @@ struct inet6_protocol {
 
        int     (*gso_send_check)(struct sk_buff *skb);
        struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-                                      u32 features);
+                                      netdev_features_t features);
        struct sk_buff **(*gro_receive)(struct sk_buff **head,
                                        struct sk_buff *skb);
        int     (*gro_complete)(struct sk_buff *skb);
@@ -91,7 +91,7 @@ struct inet_protosw {
 
 extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
@@ -100,7 +100,7 @@ extern int  inet_del_protocol(const struct net_protocol *prot, unsigned char num)
 extern void    inet_register_protosw(struct inet_protosw *p);
 extern void    inet_unregister_protosw(struct inet_protosw *p);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 extern int     inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int     inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
 extern int     inet6_register_protosw(struct inet_protosw *p);
index b72a3b8..ef715a1 100644 (file)
@@ -5,6 +5,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/dsfield.h>
+#include <linux/reciprocal_div.h>
 
 /*     Random Early Detection (RED) algorithm.
        =======================================
        etc.
  */
 
+/*
+ * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM
+ * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001
+ *
+ * Every 500 ms:
+ *  if (avg > target and max_p <= 0.5)
+ *   increase max_p : max_p += alpha;
+ *  else if (avg < target and max_p >= 0.01)
+ *   decrease max_p : max_p *= beta;
+ *
+ * target :[qth_min + 0.4*(qth_min - qth_max),
+ *          qth_min + 0.6*(qth_min - qth_max)].
+ * alpha : min(0.01, max_p / 4)
+ * beta : 0.9
+ * max_P is a Q0.32 fixed point number (with 32 bits mantissa)
+ * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ]
+ */
+#define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100))
+
+#define MAX_P_MIN (1 * RED_ONE_PERCENT)
+#define MAX_P_MAX (50 * RED_ONE_PERCENT)
+#define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4)
+
 #define RED_STAB_SIZE  256
 #define RED_STAB_MASK  (RED_STAB_SIZE - 1)
 
@@ -101,10 +125,14 @@ struct red_stats {
 
 struct red_parms {
        /* Parameters */
-       u32             qth_min;        /* Min avg length threshold: A scaled */
-       u32             qth_max;        /* Max avg length threshold: A scaled */
+       u32             qth_min;        /* Min avg length threshold: Wlog scaled */
+       u32             qth_max;        /* Max avg length threshold: Wlog scaled */
        u32             Scell_max;
-       u32             Rmask;          /* Cached random mask, see red_rmask */
+       u32             max_P;          /* probability, [0 .. 1.0] 32 scaled */
+       u32             max_P_reciprocal; /* reciprocal_value(max_P / qth_delta) */
+       u32             qth_delta;      /* max_th - min_th */
+       u32             target_min;     /* min_th + 0.4*(max_th - min_th) */
+       u32             target_max;     /* min_th + 0.6*(max_th - min_th) */
        u8              Scell_log;
        u8              Wlog;           /* log(W)               */
        u8              Plog;           /* random number bits   */
@@ -115,19 +143,23 @@ struct red_parms {
                                           number generation */
        u32             qR;             /* Cached random number */
 
-       unsigned long   qavg;           /* Average queue length: A scaled */
+       unsigned long   qavg;           /* Average queue length: Wlog scaled */
        ktime_t         qidlestart;     /* Start of current idle period */
 };
 
-static inline u32 red_rmask(u8 Plog)
+static inline u32 red_maxp(u8 Plog)
 {
-       return Plog < 32 ? ((1 << Plog) - 1) : ~0UL;
+       return Plog < 32 ? (~0U >> Plog) : ~0U;
 }
 
+
 static inline void red_set_parms(struct red_parms *p,
                                 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
-                                u8 Scell_log, u8 *stab)
+                                u8 Scell_log, u8 *stab, u32 max_P)
 {
+       int delta = qth_max - qth_min;
+       u32 max_p_delta;
+
        /* Reset average queue length, the value is strictly bound
         * to the parameters below, reseting hurts a bit but leaving
         * it might result in an unreasonable qavg for a while. --TGR
@@ -139,14 +171,33 @@ static inline void red_set_parms(struct red_parms *p,
        p->qth_max      = qth_max << Wlog;
        p->Wlog         = Wlog;
        p->Plog         = Plog;
-       p->Rmask        = red_rmask(Plog);
+       if (delta < 0)
+               delta = 1;
+       p->qth_delta    = delta;
+       if (!max_P) {
+               max_P = red_maxp(Plog);
+               max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */
+       }
+       p->max_P = max_P;
+       max_p_delta = max_P / delta;
+       max_p_delta = max(max_p_delta, 1U);
+       p->max_P_reciprocal  = reciprocal_value(max_p_delta);
+
+       /* RED Adaptative target :
+        * [min_th + 0.4*(min_th - max_th),
+        *  min_th + 0.6*(min_th - max_th)].
+        */
+       delta /= 5;
+       p->target_min = qth_min + 2*delta;
+       p->target_max = qth_min + 3*delta;
+
        p->Scell_log    = Scell_log;
        p->Scell_max    = (255 << Scell_log);
 
        memcpy(p->Stab, stab, sizeof(p->Stab));
 }
 
-static inline int red_is_idling(struct red_parms *p)
+static inline int red_is_idling(const struct red_parms *p)
 {
        return p->qidlestart.tv64 != 0;
 }
@@ -168,7 +219,7 @@ static inline void red_restart(struct red_parms *p)
        p->qcount = -1;
 }
 
-static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
+static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p)
 {
        s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
        long us_idle = min_t(s64, delta, p->Scell_max);
@@ -215,7 +266,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
        }
 }
 
-static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
+static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p,
                                                       unsigned int backlog)
 {
        /*
@@ -230,7 +281,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(struct red_parms *p,
        return p->qavg + (backlog - (p->qavg >> p->Wlog));
 }
 
-static inline unsigned long red_calc_qavg(struct red_parms *p,
+static inline unsigned long red_calc_qavg(const struct red_parms *p,
                                          unsigned int backlog)
 {
        if (!red_is_idling(p))
@@ -239,23 +290,24 @@ static inline unsigned long red_calc_qavg(struct red_parms *p,
                return red_calc_qavg_from_idle_time(p);
 }
 
-static inline u32 red_random(struct red_parms *p)
+
+static inline u32 red_random(const struct red_parms *p)
 {
-       return net_random() & p->Rmask;
+       return reciprocal_divide(net_random(), p->max_P_reciprocal);
 }
 
-static inline int red_mark_probability(struct red_parms *p, unsigned long qavg)
+static inline int red_mark_probability(const struct red_parms *p, unsigned long qavg)
 {
        /* The formula used below causes questions.
 
-          OK. qR is random number in the interval 0..Rmask
+          OK. qR is random number in the interval
+               (0..1/max_P)*(qth_max-qth_min)
           i.e. 0..(2^Plog). If we used floating point
           arithmetics, it would be: (2^Plog)*rnd_num,
           where rnd_num is less 1.
 
           Taking into account, that qavg have fixed
-          point at Wlog, and Plog is related to max_P by
-          max_P = (qth_max-qth_min)/2^Plog; two lines
+          point at Wlog, two lines
           below have the following floating point equivalent:
 
           max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount
@@ -315,4 +367,25 @@ static inline int red_action(struct red_parms *p, unsigned long qavg)
        return RED_DONT_MARK;
 }
 
+static inline void red_adaptative_algo(struct red_parms *p)
+{
+       unsigned long qavg;
+       u32 max_p_delta;
+
+       qavg = p->qavg;
+       if (red_is_idling(p))
+               qavg = red_calc_qavg_from_idle_time(p);
+
+       /* p->qavg is fixed point number with point at Wlog */
+       qavg >>= p->Wlog;
+
+       if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
+               p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */
+       else if (qavg < p->target_min && p->max_P >= MAX_P_MIN)
+               p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */
+
+       max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta);
+       max_p_delta = max(max_p_delta, 1U);
+       p->max_P_reciprocal = reciprocal_value(max_p_delta);
+}
 #endif
index eb7d3c2..a5f7993 100644 (file)
@@ -48,6 +48,10 @@ enum environment_cap {
  *     99 - built by driver but a specific alpha2 cannot be determined
  *     98 - result of an intersection between two regulatory domains
  *     97 - regulatory domain has not yet been configured
+ * @dfs_region: If CRDA responded with a regulatory domain that requires
+ *     DFS master operation on a known DFS region (NL80211_DFS_*),
+ *     dfs_region represents that region. Drivers can use this and the
+ *     @alpha2 to adjust their device's DFS parameters as required.
  * @intersect: indicates whether the wireless core should intersect
  *     the requested regulatory domain with the presently set regulatory
  *     domain.
@@ -67,6 +71,7 @@ struct regulatory_request {
        int wiphy_idx;
        enum nl80211_reg_initiator initiator;
        char alpha2[2];
+       u8 dfs_region;
        bool intersect;
        bool processed;
        enum environment_cap country_ie_env;
@@ -93,6 +98,7 @@ struct ieee80211_reg_rule {
 struct ieee80211_regdomain {
        u32 n_reg_rules;
        char alpha2[2];
+       u8 dfs_region;
        struct ieee80211_reg_rule reg_rules[];
 };
 
index 6a72a58..d368561 100644 (file)
@@ -71,7 +71,7 @@
 #include <linux/jiffies.h>
 #include <linux/idr.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #endif
@@ -383,7 +383,7 @@ static inline void sctp_sysctl_unregister(void) { return; }
 /* Size of Supported Address Parameter for 'x' address types. */
 #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 void sctp_v6_pf_init(void);
 void sctp_v6_pf_exit(void);
index e90e7a9..ad0e31b 100644 (file)
@@ -365,7 +365,7 @@ static inline struct sock *sctp_opt2sk(const struct sctp_sock *sp)
        return (struct sock *)sp;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct sctp6_sock {
        struct sctp_sock  sctp;
        struct ipv6_pinfo inet6;
@@ -1085,6 +1085,7 @@ void sctp_transport_burst_reset(struct sctp_transport *);
 unsigned long sctp_transport_timeout(struct sctp_transport *);
 void sctp_transport_reset(struct sctp_transport *);
 void sctp_transport_update_pmtu(struct sctp_transport *, u32);
+void sctp_transport_immediate_rtx(struct sctp_transport *);
 
 
 /* This is the structure we use to queue packets as they come into
index 8f0f9ac..2f65e16 100644 (file)
@@ -67,7 +67,7 @@ struct icmp_mib {
 
 #define ICMPMSG_MIB_MAX        __ICMPMSG_MIB_MAX
 struct icmpmsg_mib {
-       unsigned long   mibs[ICMPMSG_MIB_MAX];
+       atomic_long_t   mibs[ICMPMSG_MIB_MAX];
 };
 
 /* ICMP6 (IPv6-ICMP) */
@@ -84,7 +84,7 @@ struct icmpv6_mib_device {
 #define ICMP6MSG_MIB_MAX  __ICMP6MSG_MIB_MAX
 /* per network ns counters */
 struct icmpv6msg_mib {
-       unsigned long   mibs[ICMP6MSG_MIB_MAX];
+       atomic_long_t   mibs[ICMP6MSG_MIB_MAX];
 };
 /* per device counters, (shared on all cpus) */
 struct icmpv6msg_mib_device {
index abb6e0f..6fe0dae 100644 (file)
@@ -53,6 +53,8 @@
 #include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 
+struct cgroup;
+struct cgroup_subsys;
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
 /*
  * This structure really needs to be cleaned up.
  * Most of it is for TCP, and not used by any of
@@ -167,6 +173,7 @@ struct sock_common {
        /* public: */
 };
 
+struct cg_proto;
 /**
   *    struct sock - network layer representation of sockets
   *    @__sk_common: shared layout with inet_timewait_sock
@@ -227,6 +234,7 @@ struct sock_common {
   *    @sk_security: used by security modules
   *    @sk_mark: generic packet mark
   *    @sk_classid: this socket's cgroup classid
+  *    @sk_cgrp: this socket's cgroup-specific proto data
   *    @sk_write_pending: a write to stream socket waits to start
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
@@ -306,8 +314,8 @@ struct sock {
        kmemcheck_bitfield_end(flags);
        int                     sk_wmem_queued;
        gfp_t                   sk_allocation;
-       int                     sk_route_caps;
-       int                     sk_route_nocaps;
+       netdev_features_t       sk_route_caps;
+       netdev_features_t       sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
        int                     sk_rcvlowat;
@@ -320,6 +328,9 @@ struct sock {
        unsigned short          sk_ack_backlog;
        unsigned short          sk_max_ack_backlog;
        __u32                   sk_priority;
+#ifdef CONFIG_CGROUPS
+       __u32                   sk_cgrp_prioidx;
+#endif
        struct pid              *sk_peer_pid;
        const struct cred       *sk_peer_cred;
        long                    sk_rcvtimeo;
@@ -338,6 +349,7 @@ struct sock {
 #endif
        __u32                   sk_mark;
        u32                     sk_classid;
+       struct cg_proto         *sk_cgrp;
        void                    (*sk_state_change)(struct sock *sk);
        void                    (*sk_data_ready)(struct sock *sk, int bytes);
        void                    (*sk_write_space)(struct sock *sk);
@@ -563,6 +575,7 @@ enum sock_flags {
        SOCK_FASYNC, /* fasync() active */
        SOCK_RXQ_OVFL,
        SOCK_ZEROCOPY, /* buffers from userspace */
+       SOCK_WIFI_STATUS, /* push wifi status to userspace */
 };
 
 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -833,6 +846,37 @@ struct proto {
 #ifdef SOCK_REFCNT_DEBUG
        atomic_t                socks;
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       /*
+        * cgroup specific init/deinit functions. Called once for all
+        * protocols that implement it, from cgroups populate function.
+        * This function has to setup any files the protocol want to
+        * appear in the kmem cgroup filesystem.
+        */
+       int                     (*init_cgroup)(struct cgroup *cgrp,
+                                              struct cgroup_subsys *ss);
+       void                    (*destroy_cgroup)(struct cgroup *cgrp,
+                                                 struct cgroup_subsys *ss);
+       struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+struct cg_proto {
+       void                    (*enter_memory_pressure)(struct sock *sk);
+       struct res_counter      *memory_allocated;      /* Current allocated memory. */
+       struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
+       int                     *memory_pressure;
+       long                    *sysctl_mem;
+       /*
+        * memcg field is used to find which memcg we belong directly
+        * Each memcg struct can hold more than one cg_proto, so container_of
+        * won't really cut.
+        *
+        * The elegant solution would be having an inverse function to
+        * proto_cgroup in struct proto, but that means polluting the structure
+        * for everybody, instead of just for memcg users.
+        */
+       struct mem_cgroup       *memcg;
 };
 
 extern int proto_register(struct proto *prot, int alloc_slab);
@@ -851,7 +895,7 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
               sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
 }
 
-static inline void sk_refcnt_debug_release(const struct sock *sk)
+inline void sk_refcnt_debug_release(const struct sock *sk)
 {
        if (atomic_read(&sk->sk_refcnt) != 1)
                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
@@ -863,6 +907,208 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
 #define sk_refcnt_debug_release(sk) do { } while (0)
 #endif /* SOCK_REFCNT_DEBUG */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+extern struct jump_label_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+                                              struct cg_proto *cg_proto)
+{
+       return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+                                              struct cg_proto *cg_proto)
+{
+       return NULL;
+}
+#endif
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+       return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+       if (!sk->sk_prot->memory_pressure)
+               return false;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return !!*sk->sk_cgrp->memory_pressure;
+
+       return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+       int *memory_pressure = sk->sk_prot->memory_pressure;
+
+       if (!memory_pressure)
+               return;
+
+       if (*memory_pressure)
+               *memory_pressure = 0;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+               struct proto *prot = sk->sk_prot;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       if (*cg_proto->memory_pressure)
+                               *cg_proto->memory_pressure = 0;
+       }
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+       if (!sk->sk_prot->enter_memory_pressure)
+               return;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+               struct proto *prot = sk->sk_prot;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       cg_proto->enter_memory_pressure(sk);
+       }
+
+       sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+       long *prot = sk->sk_prot->sysctl_mem;
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               prot = sk->sk_cgrp->sysctl_mem;
+       return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+                                             unsigned long amt,
+                                             int *parent_status)
+{
+       struct res_counter *fail;
+       int ret;
+
+       ret = res_counter_charge(prot->memory_allocated,
+                                amt << PAGE_SHIFT, &fail);
+
+       if (ret < 0)
+               *parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+                                             unsigned long amt)
+{
+       res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+       u64 ret;
+       ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+       return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return memcg_memory_allocated_read(sk->sk_cgrp);
+
+       return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+               /* update the root cgroup regardless */
+               atomic_long_add_return(amt, prot->memory_allocated);
+               return memcg_memory_allocated_read(sk->sk_cgrp);
+       }
+
+       return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
+           parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+               memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+       atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       percpu_counter_dec(cg_proto->sockets_allocated);
+       }
+
+       percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+               struct cg_proto *cg_proto = sk->sk_cgrp;
+
+               for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+                       percpu_counter_inc(cg_proto->sockets_allocated);
+       }
+
+       percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+       struct proto *prot = sk->sk_prot;
+
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+
+       return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+       return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+       return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+       if (!prot->memory_pressure)
+               return false;
+       return !!*prot->memory_pressure;
+}
+
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
@@ -1089,8 +1335,8 @@ extern struct sock                *sk_alloc(struct net *net, int family,
                                          struct proto *prot);
 extern void                    sk_free(struct sock *sk);
 extern void                    sk_release_kernel(struct sock *sk);
-extern struct sock             *sk_clone(const struct sock *sk,
-                                         const gfp_t priority);
+extern struct sock             *sk_clone_lock(const struct sock *sk,
+                                              const gfp_t priority);
 
 extern struct sk_buff          *sock_wmalloc(struct sock *sk,
                                              unsigned long size, int force,
@@ -1393,7 +1639,7 @@ static inline int sk_can_gso(const struct sock *sk)
 
 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
-static inline void sk_nocaps_add(struct sock *sk, int flags)
+static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 {
        sk->sk_route_nocaps |= flags;
        sk->sk_route_caps &= ~flags;
@@ -1670,7 +1916,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
 
        page = alloc_pages(sk->sk_allocation, 0);
        if (!page) {
-               sk->sk_prot->enter_memory_pressure(sk);
+               sk_enter_memory_pressure(sk);
                sk_stream_moderate_sndbuf(sk);
        }
        return page;
@@ -1714,6 +1960,8 @@ static inline int sock_intr_errno(long timeo)
 
 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
        struct sk_buff *skb);
+extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+       struct sk_buff *skb);
 
 static __inline__ void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -1741,6 +1989,9 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
                __sock_recv_timestamp(msg, sk, skb);
        else
                sk->sk_stamp = kt;
+
+       if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+               __sock_recv_wifi_status(msg, sk, skb);
 }
 
 extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
index bb18c4d..a4f52e1 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/dst.h>
 
 #include <linux/seq_file.h>
+#include <linux/memcontrol.h>
 
 extern struct inet_hashinfo tcp_hashinfo;
 
@@ -229,7 +230,6 @@ extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_ecn;
 extern int sysctl_tcp_dsack;
-extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -285,7 +285,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
        }
 
        if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-           atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+           sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
                return true;
        return false;
 }
@@ -628,7 +628,7 @@ extern u32 __tcp_select_window(struct sock *sk);
 struct tcp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;       /* For incoming frames          */
@@ -834,6 +834,14 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
+/* The maximum number of MSS of available cwnd for which TSO defers
+ * sending if not using sysctl_tcp_tso_win_divisor.
+ */
+static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
+{
+       return 3;
+}
+
 /* Slow start with delack produces 3 packets of burst, so that
  * it is safe "de facto".  This will be the default - same as
  * the default reordering threshold - but if reordering increases,
@@ -1144,7 +1152,7 @@ struct tcp6_md5sig_key {
 /* - sock block */
 struct tcp_md5sig_info {
        struct tcp4_md5sig_key  *keys4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct tcp6_md5sig_key  *keys6;
        u32                     entries6;
        u32                     alloced6;
@@ -1171,7 +1179,7 @@ struct tcp6_pseudohdr {
 
 union tcp_md5sum_block {
        struct tcp4_pseudohdr ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct tcp6_pseudohdr ip6;
 #endif
 };
@@ -1430,7 +1438,8 @@ extern struct request_sock_ops tcp6_request_sock_ops;
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+                                      netdev_features_t features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
                                        struct sk_buff *skb);
 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h
new file mode 100644 (file)
index 0000000..3512082
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _TCP_MEMCG_H
+#define _TCP_MEMCG_H
+
+struct tcp_memcontrol {
+       struct cg_proto cg_proto;
+       /* per-cgroup tcp memory pressure knobs */
+       struct res_counter tcp_memory_allocated;
+       struct percpu_counter tcp_sockets_allocated;
+       /* those two are read-mostly, leave them at the end */
+       long tcp_prot_mem[3];
+       int tcp_memory_pressure;
+};
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss);
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
+#endif /* _TCP_MEMCG_H */
index 3b285f4..e39592f 100644 (file)
@@ -41,7 +41,7 @@
 struct udp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
@@ -194,9 +194,15 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
                                    __be32 daddr, __be16 dport,
                                    int dif);
+extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                                   __be32 daddr, __be16 dport,
+                                   int dif, struct udp_table *tbl);
 extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
                                    const struct in6_addr *daddr, __be16 dport,
                                    int dif);
+extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
+                                   const struct in6_addr *daddr, __be16 dport,
+                                   int dif, struct udp_table *tbl);
 
 /*
  *     SNMP statistics for UDP and UDP-Lite
@@ -217,7 +223,7 @@ extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *sadd
        else        SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field);      \
 } while(0)
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define UDPX_INC_STATS_BH(sk, field) \
        do { \
                if ((sk)->sk_family == AF_INET) \
@@ -258,5 +264,6 @@ extern void udp4_proc_exit(void);
 extern void udp_init(void);
 
 extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+       netdev_features_t features);
 #endif /* _UDP_H */
index b203e14..89174e2 100644 (file)
@@ -827,6 +827,14 @@ static inline bool addr_match(const void *token1, const void *token2,
        return true;
 }
 
+static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
+{
+       /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
+       if (prefixlen == 0)
+               return true;
+       return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
+}
+
 static __inline__
 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
 {
@@ -1209,8 +1217,8 @@ void xfrm_flowi_addr_get(const struct flowi *fl,
                memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
                break;
        case AF_INET6:
-               ipv6_addr_copy((struct in6_addr *)&saddr->a6, &fl->u.ip6.saddr);
-               ipv6_addr_copy((struct in6_addr *)&daddr->a6, &fl->u.ip6.daddr);
+               *(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
+               *(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
                break;
        }
 }
index 43298f9..b8930d5 100644 (file)
@@ -689,6 +689,17 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
          For those who want to have the feature enabled by default should
          select this option (if, for some reason, they need to disable it
          then swapaccount=0 does the trick).
+config CGROUP_MEM_RES_CTLR_KMEM
+       bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
+       depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL
+       default n
+       help
+         The Kernel Memory extension for Memory Resource Controller can limit
+         the amount of memory used by kernel objects in the system. Those are
+         fundamentally different from the entities handled by the standard
+         Memory Controller, which are page-based, and can be swapped. Users of
+         the kmem extension can use it to guarantee that no group of processes
+         will ever exhaust kernel resources alone.
 
 config CGROUP_PERF
        bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
index 32f3e5a..63b5782 100644 (file)
@@ -244,6 +244,9 @@ config CPU_RMAP
        bool
        depends on SMP
 
+config DQL
+       bool
+
 #
 # Netlink attribute parsing support is select'ed if needed
 #
index a4da283..ff00d4d 100644 (file)
@@ -115,6 +115,8 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
 
 obj-$(CONFIG_CORDIC) += cordic.o
 
+obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
 
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
new file mode 100644 (file)
index 0000000..3d1bdcd
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Dynamic byte queue limits.  See include/linux/dynamic_queue_limits.h
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/dynamic_queue_limits.h>
+
+#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
+
+/* Records completed count and recalculates the queue limit */
+void dql_completed(struct dql *dql, unsigned int count)
+{
+       unsigned int inprogress, prev_inprogress, limit;
+       unsigned int ovlimit, all_prev_completed, completed;
+
+       /* Can't complete more than what's in queue */
+       BUG_ON(count > dql->num_queued - dql->num_completed);
+
+       completed = dql->num_completed + count;
+       limit = dql->limit;
+       ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
+       inprogress = dql->num_queued - completed;
+       prev_inprogress = dql->prev_num_queued - dql->num_completed;
+       all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
+
+       if ((ovlimit && !inprogress) ||
+           (dql->prev_ovlimit && all_prev_completed)) {
+               /*
+                * Queue considered starved if:
+                *   - The queue was over-limit in the last interval,
+                *     and there is no more data in the queue.
+                *  OR
+                *   - The queue was over-limit in the previous interval and
+                *     when enqueuing it was possible that all queued data
+                *     had been consumed.  This covers the case when queue
+                *     may have becomes starved between completion processing
+                *     running and next time enqueue was scheduled.
+                *
+                *     When queue is starved increase the limit by the amount
+                *     of bytes both sent and completed in the last interval,
+                *     plus any previous over-limit.
+                */
+               limit += POSDIFF(completed, dql->prev_num_queued) +
+                    dql->prev_ovlimit;
+               dql->slack_start_time = jiffies;
+               dql->lowest_slack = UINT_MAX;
+       } else if (inprogress && prev_inprogress && !all_prev_completed) {
+               /*
+                * Queue was not starved, check if the limit can be decreased.
+                * A decrease is only considered if the queue has been busy in
+                * the whole interval (the check above).
+                *
+                * If there is slack, the amount of execess data queued above
+                * the the amount needed to prevent starvation, the queue limit
+                * can be decreased.  To avoid hysteresis we consider the
+                * minimum amount of slack found over several iterations of the
+                * completion routine.
+                */
+               unsigned int slack, slack_last_objs;
+
+               /*
+                * Slack is the maximum of
+                *   - The queue limit plus previous over-limit minus twice
+                *     the number of objects completed.  Note that two times
+                *     number of completed bytes is a basis for an upper bound
+                *     of the limit.
+                *   - Portion of objects in the last queuing operation that
+                *     was not part of non-zero previous over-limit.  That is
+                *     "round down" by non-overlimit portion of the last
+                *     queueing operation.
+                */
+               slack = POSDIFF(limit + dql->prev_ovlimit,
+                   2 * (completed - dql->num_completed));
+               slack_last_objs = dql->prev_ovlimit ?
+                   POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
+
+               slack = max(slack, slack_last_objs);
+
+               if (slack < dql->lowest_slack)
+                       dql->lowest_slack = slack;
+
+               if (time_after(jiffies,
+                              dql->slack_start_time + dql->slack_hold_time)) {
+                       limit = POSDIFF(limit, dql->lowest_slack);
+                       dql->slack_start_time = jiffies;
+                       dql->lowest_slack = UINT_MAX;
+               }
+       }
+
+       /* Enforce bounds on limit */
+       limit = clamp(limit, dql->min_limit, dql->max_limit);
+
+       if (limit != dql->limit) {
+               dql->limit = limit;
+               ovlimit = 0;
+       }
+
+       dql->adj_limit = limit + completed;
+       dql->prev_ovlimit = ovlimit;
+       dql->prev_last_obj_cnt = dql->last_obj_cnt;
+       dql->num_completed = completed;
+       dql->prev_num_queued = dql->num_queued;
+}
+EXPORT_SYMBOL(dql_completed);
+
+void dql_reset(struct dql *dql)
+{
+       /* Reset all dynamic values */
+       dql->limit = 0;
+       dql->num_queued = 0;
+       dql->num_completed = 0;
+       dql->last_obj_cnt = 0;
+       dql->prev_num_queued = 0;
+       dql->prev_last_obj_cnt = 0;
+       dql->prev_ovlimit = 0;
+       dql->lowest_slack = UINT_MAX;
+       dql->slack_start_time = jiffies;
+}
+EXPORT_SYMBOL(dql_reset);
+
+int dql_init(struct dql *dql, unsigned hold_time)
+{
+       dql->max_limit = DQL_MAX_LIMIT;
+       dql->min_limit = 0;
+       dql->slack_hold_time = hold_time;
+       dql_reset(dql);
+       return 0;
+}
+EXPORT_SYMBOL(dql_init);
index 6a3bd48..75510e9 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/div64.h>
 #include <linux/reciprocal_div.h>
+#include <linux/export.h>
 
 u32 reciprocal_value(u32 k)
 {
@@ -7,3 +8,4 @@ u32 reciprocal_value(u32 k)
        do_div(val, k);
        return (u32)val;
 }
+EXPORT_SYMBOL(reciprocal_value);
index 993599e..8e75003 100644 (file)
@@ -777,6 +777,18 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
+static
+char *netdev_feature_string(char *buf, char *end, const u8 *addr,
+                     struct printf_spec spec)
+{
+       spec.flags |= SPECIAL | SMALL | ZEROPAD;
+       if (spec.field_width == -1)
+               spec.field_width = 2 + 2 * sizeof(netdev_features_t);
+       spec.base = 16;
+
+       return number(buf, end, *(const netdev_features_t *)addr, spec);
+}
+
 int kptr_restrict __read_mostly;
 
 /*
@@ -824,6 +836,7 @@ int kptr_restrict __read_mostly;
  *       Do not use this feature without some mechanism to verify the
  *       correctness of the format string and va_list arguments.
  * - 'K' For a kernel pointer that should be hidden from unprivileged users
+ * - 'NF' For a netdev_features_t
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -896,6 +909,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                       has_capability_noaudit(current, CAP_SYSLOG))))
                        ptr = NULL;
                break;
+       case 'N':
+               switch (fmt[1]) {
+               case 'F':
+                       return netdev_feature_string(buf, end, ptr, spec);
+               }
+               break;
        }
        spec.flags |= SMALL;
        if (spec.field_width == -1) {
index 6aff93c..7266202 100644 (file)
@@ -50,6 +50,8 @@
 #include <linux/cpu.h>
 #include <linux/oom.h>
 #include "internal.h"
+#include <net/sock.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -227,6 +229,10 @@ struct mem_cgroup {
         */
        struct res_counter memsw;
        /*
+        * the counter to account for kmem usage.
+        */
+       struct res_counter kmem;
+       /*
         * Per cgroup active and inactive list, similar to the
         * per zone LRU lists.
         */
@@ -277,6 +283,11 @@ struct mem_cgroup {
         */
        unsigned long   move_charge_at_immigrate;
        /*
+        * Should kernel memory limits be stabilished independently
+        * from user memory ?
+        */
+       int             kmem_independent_accounting;
+       /*
         * percpu counter.
         */
        struct mem_cgroup_stat_cpu *stat;
@@ -286,6 +297,10 @@ struct mem_cgroup {
         */
        struct mem_cgroup_stat_cpu nocpu_base;
        spinlock_t pcp_counter_lock;
+
+#ifdef CONFIG_INET
+       struct tcp_memcontrol tcp_mem;
+#endif
 };
 
 /* Stuffs for move charges at task migration. */
@@ -344,9 +359,14 @@ enum charge_type {
 };
 
 /* for encoding cft->private value on file */
-#define _MEM                   (0)
-#define _MEMSWAP               (1)
-#define _OOM_TYPE              (2)
+
+enum mem_type {
+       _MEM = 0,
+       _MEMSWAP,
+       _OOM_TYPE,
+       _KMEM,
+};
+
 #define MEMFILE_PRIVATE(x, val)        (((x) << 16) | (val))
 #define MEMFILE_TYPE(val)      (((val) >> 16) & 0xffff)
 #define MEMFILE_ATTR(val)      ((val) & 0xffff)
@@ -365,7 +385,58 @@ enum charge_type {
 
 static void mem_cgroup_get(struct mem_cgroup *memcg);
 static void mem_cgroup_put(struct mem_cgroup *memcg);
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+
+/* Writing them here to avoid exposing memcg's inner layout */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_INET
+#include <net/sock.h>
+#include <net/ip.h>
+
+static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
+void sock_update_memcg(struct sock *sk)
+{
+       /* A socket spends its whole life in the same cgroup */
+       if (sk->sk_cgrp) {
+               WARN_ON(1);
+               return;
+       }
+       if (static_branch(&memcg_socket_limit_enabled)) {
+               struct mem_cgroup *memcg;
+
+               BUG_ON(!sk->sk_prot->proto_cgroup);
+
+               rcu_read_lock();
+               memcg = mem_cgroup_from_task(current);
+               if (!mem_cgroup_is_root(memcg)) {
+                       mem_cgroup_get(memcg);
+                       sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+               }
+               rcu_read_unlock();
+       }
+}
+EXPORT_SYMBOL(sock_update_memcg);
+
+void sock_release_memcg(struct sock *sk)
+{
+       if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+               struct mem_cgroup *memcg;
+               WARN_ON(!sk->sk_cgrp->memcg);
+               memcg = sk->sk_cgrp->memcg;
+               mem_cgroup_put(memcg);
+       }
+}
+
+struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
+{
+       if (!memcg || mem_cgroup_is_root(memcg))
+               return NULL;
+
+       return &memcg->tcp_mem.cg_proto;
+}
+EXPORT_SYMBOL(tcp_proto_cgroup);
+#endif /* CONFIG_INET */
+#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+
 static void drain_all_stock_async(struct mem_cgroup *memcg);
 
 static struct mem_cgroup_per_zone *
@@ -745,7 +816,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
        preempt_enable();
 }
 
-static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
 {
        return container_of(cgroup_subsys_state(cont,
                                mem_cgroup_subsys_id), struct mem_cgroup,
@@ -3848,10 +3919,17 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
        u64 val;
 
        if (!mem_cgroup_is_root(memcg)) {
+               val = 0;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+               if (!memcg->kmem_independent_accounting)
+                       val = res_counter_read_u64(&memcg->kmem, RES_USAGE);
+#endif
                if (!swap)
-                       return res_counter_read_u64(&memcg->res, RES_USAGE);
+                       val += res_counter_read_u64(&memcg->res, RES_USAGE);
                else
-                       return res_counter_read_u64(&memcg->memsw, RES_USAGE);
+                       val += res_counter_read_u64(&memcg->memsw, RES_USAGE);
+
+               return val;
        }
 
        val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
@@ -3884,6 +3962,11 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
                else
                        val = res_counter_read_u64(&memcg->memsw, name);
                break;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       case _KMEM:
+               val = res_counter_read_u64(&memcg->kmem, name);
+               break;
+#endif
        default:
                BUG();
                break;
@@ -4612,6 +4695,89 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
 }
 #endif /* CONFIG_NUMA */
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+static u64 kmem_limit_independent_read(struct cgroup *cgroup, struct cftype *cft)
+{
+       return mem_cgroup_from_cont(cgroup)->kmem_independent_accounting;
+}
+
+static int kmem_limit_independent_write(struct cgroup *cgroup, struct cftype *cft,
+                                       u64 val)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
+       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+
+       val = !!val;
+
+       /*
+        * This follows the same hierarchy restrictions than
+        * mem_cgroup_hierarchy_write()
+        */
+       if (!parent || !parent->use_hierarchy) {
+               if (list_empty(&cgroup->children))
+                       memcg->kmem_independent_accounting = val;
+               else
+                       return -EBUSY;
+       }
+       else
+               return -EINVAL;
+
+       return 0;
+}
+static struct cftype kmem_cgroup_files[] = {
+       {
+               .name = "independent_kmem_limit",
+               .read_u64 = kmem_limit_independent_read,
+               .write_u64 = kmem_limit_independent_write,
+       },
+       {
+               .name = "kmem.usage_in_bytes",
+               .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+               .read_u64 = mem_cgroup_read,
+       },
+       {
+               .name = "kmem.limit_in_bytes",
+               .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
+               .read_u64 = mem_cgroup_read,
+       },
+};
+
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+       int ret = 0;
+
+       ret = cgroup_add_files(cont, ss, kmem_cgroup_files,
+                              ARRAY_SIZE(kmem_cgroup_files));
+
+       /*
+        * Part of this would be better living in a separate allocation
+        * function, leaving us with just the cgroup tree population work.
+        * We, however, depend on state such as network's proto_list that
+        * is only initialized after cgroup creation. I found the less
+        * cumbersome way to deal with it to defer it all to populate time
+        */
+       if (!ret)
+               ret = mem_cgroup_sockets_init(cont, ss);
+       return ret;
+};
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+       mem_cgroup_sockets_destroy(cont, ss);
+}
+#else
+static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+       return 0;
+}
+
+static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
+                               struct cgroup *cont)
+{
+}
+#endif
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4843,12 +5009,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg)
 /*
  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  */
-static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 {
        if (!memcg->res.parent)
                return NULL;
        return mem_cgroup_from_res_counter(memcg->res.parent, res);
 }
+EXPORT_SYMBOL(parent_mem_cgroup);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
@@ -4925,6 +5092,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        if (parent && parent->use_hierarchy) {
                res_counter_init(&memcg->res, &parent->res);
                res_counter_init(&memcg->memsw, &parent->memsw);
+               res_counter_init(&memcg->kmem, &parent->kmem);
                /*
                 * We increment refcnt of the parent to ensure that we can
                 * safely access it on res_counter_charge/uncharge.
@@ -4935,6 +5103,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        } else {
                res_counter_init(&memcg->res, NULL);
                res_counter_init(&memcg->memsw, NULL);
+               res_counter_init(&memcg->kmem, NULL);
        }
        memcg->last_scanned_child = 0;
        memcg->last_scanned_node = MAX_NUMNODES;
@@ -4965,6 +5134,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+       kmem_cgroup_destroy(ss, cont);
+
        mem_cgroup_put(memcg);
 }
 
@@ -4978,6 +5149,10 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
 
        if (!ret)
                ret = register_memsw_files(cont, ss);
+
+       if (!ret)
+               ret = register_kmem_files(cont, ss);
+
        return ret;
 }
 
index 5471628..efea35b 100644 (file)
@@ -51,27 +51,6 @@ const char vlan_version[] = DRV_VERSION;
 
 /* End of global variables definitions. */
 
-static void vlan_group_free(struct vlan_group *grp)
-{
-       int i;
-
-       for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
-               kfree(grp->vlan_devices_arrays[i]);
-       kfree(grp);
-}
-
-static struct vlan_group *vlan_group_alloc(struct net_device *real_dev)
-{
-       struct vlan_group *grp;
-
-       grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
-       if (!grp)
-               return NULL;
-
-       grp->real_dev = real_dev;
-       return grp;
-}
-
 static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
 {
        struct net_device **array;
@@ -92,32 +71,29 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg, u16 vlan_id)
        return 0;
 }
 
-static void vlan_rcu_free(struct rcu_head *rcu)
-{
-       vlan_group_free(container_of(rcu, struct vlan_group, rcu));
-}
-
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
-       const struct net_device_ops *ops = real_dev->netdev_ops;
+       struct vlan_info *vlan_info;
        struct vlan_group *grp;
        u16 vlan_id = vlan->vlan_id;
 
        ASSERT_RTNL();
 
-       grp = rtnl_dereference(real_dev->vlgrp);
-       BUG_ON(!grp);
+       vlan_info = rtnl_dereference(real_dev->vlan_info);
+       BUG_ON(!vlan_info);
+
+       grp = &vlan_info->grp;
 
        /* Take it out of our own structures, but be sure to interlock with
         * HW accelerating devices or SW vlan input packet processing if
         * VLAN is not 0 (leave it there for 802.1p).
         */
-       if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
-               ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+       if (vlan_id)
+               vlan_vid_del(real_dev, vlan_id);
 
-       grp->nr_vlans--;
+       grp->nr_vlan_devs--;
 
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_leave(dev);
@@ -129,16 +105,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
         */
        unregister_netdevice_queue(dev, head);
 
-       /* If the group is now empty, kill off the group. */
-       if (grp->nr_vlans == 0) {
+       if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
 
-               RCU_INIT_POINTER(real_dev->vlgrp, NULL);
-
-               /* Free the group, after all cpu's are done. */
-               call_rcu(&grp->rcu, vlan_rcu_free);
-       }
-
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
 }
@@ -167,21 +136,26 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
 
 int register_vlan_dev(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
-       const struct net_device_ops *ops = real_dev->netdev_ops;
        u16 vlan_id = vlan->vlan_id;
-       struct vlan_group *grp, *ngrp = NULL;
+       struct vlan_info *vlan_info;
+       struct vlan_group *grp;
        int err;
 
-       grp = rtnl_dereference(real_dev->vlgrp);
-       if (!grp) {
-               ngrp = grp = vlan_group_alloc(real_dev);
-               if (!grp)
-                       return -ENOBUFS;
+       err = vlan_vid_add(real_dev, vlan_id);
+       if (err)
+               return err;
+
+       vlan_info = rtnl_dereference(real_dev->vlan_info);
+       /* vlan_info should be there now. vlan_vid_add took care of it */
+       BUG_ON(!vlan_info);
+
+       grp = &vlan_info->grp;
+       if (grp->nr_vlan_devs == 0) {
                err = vlan_gvrp_init_applicant(real_dev);
                if (err < 0)
-                       goto out_free_group;
+                       goto out_vid_del;
        }
 
        err = vlan_group_prealloc_vid(grp, vlan_id);
@@ -192,7 +166,7 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_applicant;
 
-       /* Account for reference in struct vlan_dev_info */
+       /* Account for reference in struct vlan_dev_priv */
        dev_hold(real_dev);
 
        netif_stacked_transfer_operstate(real_dev, dev);
@@ -202,24 +176,15 @@ int register_vlan_dev(struct net_device *dev)
         * it into our local structure.
         */
        vlan_group_set_device(grp, vlan_id, dev);
-       grp->nr_vlans++;
-
-       if (ngrp) {
-               rcu_assign_pointer(real_dev->vlgrp, ngrp);
-       }
-       if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
-               ops->ndo_vlan_rx_add_vid(real_dev, vlan_id);
+       grp->nr_vlan_devs++;
 
        return 0;
 
 out_uninit_applicant:
-       if (ngrp)
+       if (grp->nr_vlan_devs == 0)
                vlan_gvrp_uninit_applicant(real_dev);
-out_free_group:
-       if (ngrp) {
-               /* Free the group, after all cpu's are done. */
-               call_rcu(&ngrp->rcu, vlan_rcu_free);
-       }
+out_vid_del:
+       vlan_vid_del(real_dev, vlan_id);
        return err;
 }
 
@@ -267,7 +232,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
@@ -278,10 +243,10 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
         */
        new_dev->mtu = real_dev->mtu;
 
-       vlan_dev_info(new_dev)->vlan_id = vlan_id;
-       vlan_dev_info(new_dev)->real_dev = real_dev;
-       vlan_dev_info(new_dev)->dent = NULL;
-       vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+       vlan_dev_priv(new_dev)->vlan_id = vlan_id;
+       vlan_dev_priv(new_dev)->real_dev = real_dev;
+       vlan_dev_priv(new_dev)->dent = NULL;
+       vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
 
        new_dev->rtnl_link_ops = &vlan_link_ops;
        err = register_vlan_dev(new_dev);
@@ -298,7 +263,7 @@ out_free_newdev:
 static void vlan_sync_address(struct net_device *dev,
                              struct net_device *vlandev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
        /* May be called without an actual change */
        if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
@@ -360,25 +325,26 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
 {
        struct net_device *dev = ptr;
        struct vlan_group *grp;
+       struct vlan_info *vlan_info;
        int i, flgs;
        struct net_device *vlandev;
-       struct vlan_dev_info *vlan;
+       struct vlan_dev_priv *vlan;
        LIST_HEAD(list);
 
        if (is_vlan_dev(dev))
                __vlan_device_event(dev, event);
 
        if ((event == NETDEV_UP) &&
-           (dev->features & NETIF_F_HW_VLAN_FILTER) &&
-           dev->netdev_ops->ndo_vlan_rx_add_vid) {
+           (dev->features & NETIF_F_HW_VLAN_FILTER)) {
                pr_info("adding VLAN 0 to HW filter on device %s\n",
                        dev->name);
-               dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
+               vlan_vid_add(dev, 0);
        }
 
-       grp = rtnl_dereference(dev->vlgrp);
-       if (!grp)
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info)
                goto out;
+       grp = &vlan_info->grp;
 
        /* It is OK that we do not hold the group lock right now,
         * as we run under the RTNL lock.
@@ -447,7 +413,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (!(flgs & IFF_UP))
                                continue;
 
-                       vlan = vlan_dev_info(vlandev);
+                       vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                                dev_change_flags(vlandev, flgs & ~IFF_UP);
                        netif_stacked_transfer_operstate(dev, vlandev);
@@ -465,7 +431,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (flgs & IFF_UP)
                                continue;
 
-                       vlan = vlan_dev_info(vlandev);
+                       vlan = vlan_dev_priv(vlandev);
                        if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                                dev_change_flags(vlandev, flgs | IFF_UP);
                        netif_stacked_transfer_operstate(dev, vlandev);
@@ -482,9 +448,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
                        if (!vlandev)
                                continue;
 
-                       /* unregistration of last vlan destroys group, abort
+                       /* removal of last vid destroys vlan_info, abort
                         * afterwards */
-                       if (grp->nr_vlans == 1)
+                       if (vlan_info->nr_vids == 1)
                                i = VLAN_N_VID;
 
                        unregister_vlan_dev(vlandev, &list);
index 9fd45f3..a4886d9 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/if_vlan.h>
 #include <linux/u64_stats_sync.h>
+#include <linux/list.h>
 
 
 /**
@@ -40,8 +41,10 @@ struct vlan_pcpu_stats {
        u32                     tx_dropped;
 };
 
+struct netpoll;
+
 /**
- *     struct vlan_dev_info - VLAN private device data
+ *     struct vlan_dev_priv - VLAN private device data
  *     @nr_ingress_mappings: number of ingress priority mappings
  *     @ingress_priority_map: ingress priority mappings
  *     @nr_egress_mappings: number of egress priority mappings
@@ -53,7 +56,7 @@ struct vlan_pcpu_stats {
  *     @dent: proc dir entry
  *     @vlan_pcpu_stats: ptr to percpu rx stats
  */
-struct vlan_dev_info {
+struct vlan_dev_priv {
        unsigned int                            nr_ingress_mappings;
        u32                                     ingress_priority_map[8];
        unsigned int                            nr_egress_mappings;
@@ -67,13 +70,39 @@ struct vlan_dev_info {
 
        struct proc_dir_entry                   *dent;
        struct vlan_pcpu_stats __percpu         *vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll                          *netpoll;
+#endif
 };
 
-static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
 {
        return netdev_priv(dev);
 }
 
+/* if this changes, algorithm will have to be reworked because this
+ * depends on completely exhausting the VLAN identifier space.  Thus
+ * it gives constant time look-up, but in many cases it wastes memory.
+ */
+#define VLAN_GROUP_ARRAY_SPLIT_PARTS  8
+#define VLAN_GROUP_ARRAY_PART_LEN     (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS)
+
+struct vlan_group {
+       unsigned int            nr_vlan_devs;
+       struct hlist_node       hlist;  /* linked list */
+       struct net_device **vlan_devices_arrays[VLAN_GROUP_ARRAY_SPLIT_PARTS];
+};
+
+struct vlan_info {
+       struct net_device       *real_dev; /* The ethernet(like) device
+                                           * the vlan is attached to.
+                                           */
+       struct vlan_group       grp;
+       struct list_head        vid_list;
+       unsigned int            nr_vids;
+       struct rcu_head         rcu;
+};
+
 static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
                                                       u16 vlan_id)
 {
@@ -97,10 +126,10 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
 static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
                                               u16 vlan_id)
 {
-       struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+       struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-       if (grp)
-               return vlan_group_get_device(grp, vlan_id);
+       if (vlan_info)
+               return vlan_group_get_device(&vlan_info->grp, vlan_id);
 
        return NULL;
 }
@@ -121,7 +150,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
                                            u16 vlan_tci)
 {
-       struct vlan_dev_info *vip = vlan_dev_info(dev);
+       struct vlan_dev_priv *vip = vlan_dev_priv(dev);
 
        return vip->ingress_priority_map[(vlan_tci >> VLAN_PRIO_SHIFT) & 0x7];
 }
index f5ffc02..4d39d80 100644 (file)
@@ -36,7 +36,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
                        skb->pkt_type = PACKET_HOST;
        }
 
-       if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                unsigned int offset = skb->data - skb_mac_header(skb);
 
                /*
@@ -55,7 +55,7 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
        skb->vlan_tci = 0;
 
-       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
+       rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
 
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->rx_packets++;
@@ -71,10 +71,10 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
                                        u16 vlan_id)
 {
-       struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
+       struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
 
-       if (grp) {
-               return vlan_group_get_device(grp, vlan_id);
+       if (vlan_info) {
+               return vlan_group_get_device(&vlan_info->grp, vlan_id);
        } else {
                /*
                 * Bonding slaves do not have grp assigned to themselves.
@@ -90,13 +90,13 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
 
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
-       return vlan_dev_info(dev)->real_dev;
+       return vlan_dev_priv(dev)->real_dev;
 }
 EXPORT_SYMBOL(vlan_dev_real_dev);
 
 u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
-       return vlan_dev_info(dev)->vlan_id;
+       return vlan_dev_priv(dev)->vlan_id;
 }
 EXPORT_SYMBOL(vlan_dev_vlan_id);
 
@@ -110,39 +110,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
        return skb;
 }
 
-static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
-{
-       __be16 proto;
-       unsigned char *rawp;
-
-       /*
-        * Was a VLAN packet, grab the encapsulated protocol, which the layer
-        * three protocols care about.
-        */
-
-       proto = vhdr->h_vlan_encapsulated_proto;
-       if (ntohs(proto) >= 1536) {
-               skb->protocol = proto;
-               return;
-       }
-
-       rawp = skb->data;
-       if (*(unsigned short *) rawp == 0xFFFF)
-               /*
-                * This is a magic hack to spot IPX packets. Older Novell
-                * breaks the protocol design and runs IPX over 802.3 without
-                * an 802.2 LLC layer. We look for FFFF which isn't a used
-                * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
-                * but does for the rest.
-                */
-               skb->protocol = htons(ETH_P_802_3);
-       else
-               /*
-                * Real 802.2 LLC
-                */
-               skb->protocol = htons(ETH_P_802_2);
-}
-
 struct sk_buff *vlan_untag(struct sk_buff *skb)
 {
        struct vlan_hdr *vhdr;
@@ -179,3 +146,226 @@ err_free:
        kfree_skb(skb);
        return NULL;
 }
+
+
+/*
+ * vlan info and vid list
+ */
+
+static void vlan_group_free(struct vlan_group *grp)
+{
+       int i;
+
+       for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
+               kfree(grp->vlan_devices_arrays[i]);
+}
+
+static void vlan_info_free(struct vlan_info *vlan_info)
+{
+       vlan_group_free(&vlan_info->grp);
+       kfree(vlan_info);
+}
+
+static void vlan_info_rcu_free(struct rcu_head *rcu)
+{
+       vlan_info_free(container_of(rcu, struct vlan_info, rcu));
+}
+
+static struct vlan_info *vlan_info_alloc(struct net_device *dev)
+{
+       struct vlan_info *vlan_info;
+
+       vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
+       if (!vlan_info)
+               return NULL;
+
+       vlan_info->real_dev = dev;
+       INIT_LIST_HEAD(&vlan_info->vid_list);
+       return vlan_info;
+}
+
+struct vlan_vid_info {
+       struct list_head list;
+       unsigned short vid;
+       int refcount;
+};
+
+static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
+                                              unsigned short vid)
+{
+       struct vlan_vid_info *vid_info;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+               if (vid_info->vid == vid)
+                       return vid_info;
+       }
+       return NULL;
+}
+
+static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
+{
+       struct vlan_vid_info *vid_info;
+
+       vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
+       if (!vid_info)
+               return NULL;
+       vid_info->vid = vid;
+
+       return vid_info;
+}
+
+static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
+                         struct vlan_vid_info **pvid_info)
+{
+       struct net_device *dev = vlan_info->real_dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
+       struct vlan_vid_info *vid_info;
+       int err;
+
+       vid_info = vlan_vid_info_alloc(vid);
+       if (!vid_info)
+               return -ENOMEM;
+
+       if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+           ops->ndo_vlan_rx_add_vid) {
+               err =  ops->ndo_vlan_rx_add_vid(dev, vid);
+               if (err) {
+                       kfree(vid_info);
+                       return err;
+               }
+       }
+       list_add(&vid_info->list, &vlan_info->vid_list);
+       vlan_info->nr_vids++;
+       *pvid_info = vid_info;
+       return 0;
+}
+
+int vlan_vid_add(struct net_device *dev, unsigned short vid)
+{
+       struct vlan_info *vlan_info;
+       struct vlan_vid_info *vid_info;
+       bool vlan_info_created = false;
+       int err;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info) {
+               vlan_info = vlan_info_alloc(dev);
+               if (!vlan_info)
+                       return -ENOMEM;
+               vlan_info_created = true;
+       }
+       vid_info = vlan_vid_info_get(vlan_info, vid);
+       if (!vid_info) {
+               err = __vlan_vid_add(vlan_info, vid, &vid_info);
+               if (err)
+                       goto out_free_vlan_info;
+       }
+       vid_info->refcount++;
+
+       if (vlan_info_created)
+               rcu_assign_pointer(dev->vlan_info, vlan_info);
+
+       return 0;
+
+out_free_vlan_info:
+       if (vlan_info_created)
+               kfree(vlan_info);
+       return err;
+}
+EXPORT_SYMBOL(vlan_vid_add);
+
+static void __vlan_vid_del(struct vlan_info *vlan_info,
+                          struct vlan_vid_info *vid_info)
+{
+       struct net_device *dev = vlan_info->real_dev;
+       const struct net_device_ops *ops = dev->netdev_ops;
+       unsigned short vid = vid_info->vid;
+       int err;
+
+       if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
+            ops->ndo_vlan_rx_kill_vid) {
+               err = ops->ndo_vlan_rx_kill_vid(dev, vid);
+               if (err) {
+                       pr_warn("failed to kill vid %d for device %s\n",
+                               vid, dev->name);
+               }
+       }
+       list_del(&vid_info->list);
+       kfree(vid_info);
+       vlan_info->nr_vids--;
+}
+
+void vlan_vid_del(struct net_device *dev, unsigned short vid)
+{
+       struct vlan_info *vlan_info;
+       struct vlan_vid_info *vid_info;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(dev->vlan_info);
+       if (!vlan_info)
+               return;
+
+       vid_info = vlan_vid_info_get(vlan_info, vid);
+       if (!vid_info)
+               return;
+       vid_info->refcount--;
+       if (vid_info->refcount == 0) {
+               __vlan_vid_del(vlan_info, vid_info);
+               if (vlan_info->nr_vids == 0) {
+                       RCU_INIT_POINTER(dev->vlan_info, NULL);
+                       call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
+               }
+       }
+}
+EXPORT_SYMBOL(vlan_vid_del);
+
+int vlan_vids_add_by_dev(struct net_device *dev,
+                        const struct net_device *by_dev)
+{
+       struct vlan_vid_info *vid_info;
+       struct vlan_info *vlan_info;
+       int err;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(by_dev->vlan_info);
+       if (!vlan_info)
+               return 0;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+               err = vlan_vid_add(dev, vid_info->vid);
+               if (err)
+                       goto unwind;
+       }
+       return 0;
+
+unwind:
+       list_for_each_entry_continue_reverse(vid_info,
+                                            &vlan_info->vid_list,
+                                            list) {
+               vlan_vid_del(dev, vid_info->vid);
+       }
+
+       return err;
+}
+EXPORT_SYMBOL(vlan_vids_add_by_dev);
+
+void vlan_vids_del_by_dev(struct net_device *dev,
+                         const struct net_device *by_dev)
+{
+       struct vlan_vid_info *vid_info;
+       struct vlan_info *vlan_info;
+
+       ASSERT_RTNL();
+
+       vlan_info = rtnl_dereference(by_dev->vlan_info);
+       if (!vlan_info)
+               return;
+
+       list_for_each_entry(vid_info, &vlan_info->vid_list, list)
+               vlan_vid_del(dev, vid_info->vid);
+}
+EXPORT_SYMBOL(vlan_vids_del_by_dev);
index bc25286..9988d4a 100644 (file)
@@ -33,6 +33,7 @@
 #include "vlan.h"
 #include "vlanproc.h"
 #include <linux/if_vlan.h>
+#include <linux/netpoll.h>
 
 /*
  *     Rebuild the Ethernet MAC header. This is called after an ARP
@@ -72,7 +73,7 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
 {
        struct vlan_priority_tci_mapping *mp;
 
-       mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+       mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
        while (mp) {
                if (mp->priority == skb->priority) {
                        return mp->vlan_qos; /* This should already be shifted
@@ -103,10 +104,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
        u16 vlan_tci = 0;
        int rc;
 
-       if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+       if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
                vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
-               vlan_tci = vlan_dev_info(dev)->vlan_id;
+               vlan_tci = vlan_dev_priv(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                vhdr->h_vlan_TCI = htons(vlan_tci);
 
@@ -129,7 +130,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                saddr = dev->dev_addr;
 
        /* Now make the underlying real hard header */
-       dev = vlan_dev_info(dev)->real_dev;
+       dev = vlan_dev_priv(dev)->real_dev;
        rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
        if (rc > 0)
                rc += vhdrlen;
@@ -149,27 +150,29 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
         * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
         */
        if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
-           vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+           vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
                u16 vlan_tci;
-               vlan_tci = vlan_dev_info(dev)->vlan_id;
+               vlan_tci = vlan_dev_priv(dev)->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
                skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
        }
 
-       skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
+       skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
        len = skb->len;
+       if (netpoll_tx_running(dev))
+               return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
        ret = dev_queue_xmit(skb);
 
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
                struct vlan_pcpu_stats *stats;
 
-               stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+               stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
                stats->tx_bytes += len;
                u64_stats_update_end(&stats->syncp);
        } else {
-               this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+               this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
        }
 
        return ret;
@@ -180,7 +183,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
        /* TODO: gotta make sure the underlying layer can handle it,
         * maybe an IFF_VLAN_CAPABLE flag for devices?
         */
-       if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
+       if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
                return -ERANGE;
 
        dev->mtu = new_mtu;
@@ -191,7 +194,7 @@ static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 void vlan_dev_set_ingress_priority(const struct net_device *dev,
                                   u32 skb_prio, u16 vlan_prio)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
                vlan->nr_ingress_mappings--;
@@ -204,7 +207,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
 int vlan_dev_set_egress_priority(const struct net_device *dev,
                                 u32 skb_prio, u16 vlan_prio)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct vlan_priority_tci_mapping *mp = NULL;
        struct vlan_priority_tci_mapping *np;
        u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
@@ -241,7 +244,7 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
 /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
 int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        u32 old_flags = vlan->flags;
 
        if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
@@ -261,12 +264,12 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
 
 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
 {
-       strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
+       strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
 static int vlan_dev_open(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
        int err;
 
@@ -313,7 +316,7 @@ out:
 
 static int vlan_dev_stop(struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
 
        dev_mc_unsync(real_dev, dev);
@@ -332,7 +335,7 @@ static int vlan_dev_stop(struct net_device *dev)
 
 static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        struct sockaddr *addr = p;
        int err;
 
@@ -358,7 +361,7 @@ out:
 
 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        struct ifreq ifrr;
        int err = -EOPNOTSUPP;
@@ -383,7 +386,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int err = 0;
 
@@ -397,7 +400,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
                                   struct scatterlist *sgl, unsigned int sgc)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = 0;
 
@@ -409,7 +412,7 @@ static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
 
 static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int len = 0;
 
@@ -421,7 +424,7 @@ static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
 
 static int vlan_dev_fcoe_enable(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -432,7 +435,7 @@ static int vlan_dev_fcoe_enable(struct net_device *dev)
 
 static int vlan_dev_fcoe_disable(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -443,7 +446,7 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
 
 static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = -EINVAL;
 
@@ -455,7 +458,7 @@ static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
 static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
                                    struct scatterlist *sgl, unsigned int sgc)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        const struct net_device_ops *ops = real_dev->netdev_ops;
        int rc = 0;
 
@@ -468,7 +471,7 @@ static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
 
 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 
        if (dev->flags & IFF_UP) {
                if (change & IFF_ALLMULTI)
@@ -480,8 +483,8 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
 
 static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
 {
-       dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
-       dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
+       dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+       dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
 /*
@@ -519,7 +522,7 @@ static const struct net_device_ops vlan_netdev_ops;
 
 static int vlan_dev_init(struct net_device *dev)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        int subclass = 0;
 
        netif_carrier_off(dev);
@@ -568,8 +571,8 @@ static int vlan_dev_init(struct net_device *dev)
 
        vlan_dev_set_lockdep_class(dev, subclass);
 
-       vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
-       if (!vlan_dev_info(dev)->vlan_pcpu_stats)
+       vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
        return 0;
@@ -578,7 +581,7 @@ static int vlan_dev_init(struct net_device *dev)
 static void vlan_dev_uninit(struct net_device *dev)
 {
        struct vlan_priority_tci_mapping *pm;
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        int i;
 
        free_percpu(vlan->vlan_pcpu_stats);
@@ -591,18 +594,17 @@ static void vlan_dev_uninit(struct net_device *dev)
        }
 }
 
-static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
-       struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
        u32 old_features = features;
 
-       features &= real_dev->features;
        features &= real_dev->vlan_features;
+       features |= NETIF_F_RXCSUM;
+       features &= real_dev->features;
 
        features |= old_features & NETIF_F_SOFT_FEATURES;
-
-       if (dev_ethtool_get_rx_csum(real_dev))
-               features |= NETIF_F_RXCSUM;
        features |= NETIF_F_LLTX;
 
        return features;
@@ -611,7 +613,7 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
 static int vlan_ethtool_get_settings(struct net_device *dev,
                                     struct ethtool_cmd *cmd)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        return __ethtool_get_settings(vlan->real_dev, cmd);
 }
@@ -627,7 +629,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
 
-       if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+       if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
                struct vlan_pcpu_stats *p;
                u32 rx_errors = 0, tx_dropped = 0;
                int i;
@@ -636,7 +638,7 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
                        u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
                        unsigned int start;
 
-                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
+                       p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
                        do {
                                start = u64_stats_fetch_begin_bh(&p->syncp);
                                rxpackets       = p->rx_packets;
@@ -661,6 +663,57 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
        return stats;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vlan_dev_poll_controller(struct net_device *dev)
+{
+       return;
+}
+
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+{
+       struct vlan_dev_priv *info = vlan_dev_priv(dev);
+       struct net_device *real_dev = info->real_dev;
+       struct netpoll *netpoll;
+       int err = 0;
+
+       netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!netpoll)
+               goto out;
+
+       netpoll->dev = real_dev;
+       strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
+
+       err = __netpoll_setup(netpoll);
+       if (err) {
+               kfree(netpoll);
+               goto out;
+       }
+
+       info->netpoll = netpoll;
+
+out:
+       return err;
+}
+
+static void vlan_dev_netpoll_cleanup(struct net_device *dev)
+{
+       struct vlan_dev_priv *info = vlan_dev_priv(dev);
+       struct netpoll *netpoll = info->netpoll;
+
+       if (!netpoll)
+               return;
+
+       info->netpoll = NULL;
+
+        /* Wait for transmitting packets to finish before freeing. */
+        synchronize_rcu_bh();
+
+        __netpoll_cleanup(netpoll);
+        kfree(netpoll);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
 static const struct ethtool_ops vlan_ethtool_ops = {
        .get_settings           = vlan_ethtool_get_settings,
        .get_drvinfo            = vlan_ethtool_get_drvinfo,
@@ -689,6 +742,11 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
        .ndo_fcoe_ddp_target    = vlan_dev_fcoe_ddp_target,
 #endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = vlan_dev_poll_controller,
+       .ndo_netpoll_setup      = vlan_dev_netpoll_setup,
+       .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
+#endif
        .ndo_fix_features       = vlan_dev_fix_features,
 };
 
index 061cece..6f97553 100644 (file)
@@ -29,7 +29,7 @@ static struct garp_application vlan_gvrp_app __read_mostly = {
 
 int vlan_gvrp_request_join(const struct net_device *dev)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        __be16 vlan_id = htons(vlan->vlan_id);
 
        return garp_request_join(vlan->real_dev, &vlan_gvrp_app,
@@ -38,7 +38,7 @@ int vlan_gvrp_request_join(const struct net_device *dev)
 
 void vlan_gvrp_request_leave(const struct net_device *dev)
 {
-       const struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        __be16 vlan_id = htons(vlan->vlan_id);
 
        garp_request_leave(vlan->real_dev, &vlan_gvrp_app,
index 235c219..5071136 100644 (file)
@@ -105,7 +105,7 @@ static int vlan_changelink(struct net_device *dev,
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev;
        int err;
 
@@ -149,7 +149,7 @@ static inline size_t vlan_qos_map_size(unsigned int n)
 
 static size_t vlan_get_size(const struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 
        return nla_total_size(2) +      /* IFLA_VLAN_ID */
               sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
@@ -159,14 +159,14 @@ static size_t vlan_get_size(const struct net_device *dev)
 
 static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
-       struct vlan_dev_info *vlan = vlan_dev_info(dev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct vlan_priority_tci_mapping *pm;
        struct ifla_vlan_flags f;
        struct ifla_vlan_qos_mapping m;
        struct nlattr *nest;
        unsigned int i;
 
-       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_info(dev)->vlan_id);
+       NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id);
        if (vlan->flags) {
                f.flags = vlan->flags;
                f.mask  = ~0;
@@ -218,7 +218,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .kind           = "vlan",
        .maxtype        = IFLA_VLAN_MAX,
        .policy         = vlan_policy,
-       .priv_size      = sizeof(struct vlan_dev_info),
+       .priv_size      = sizeof(struct vlan_dev_priv),
        .setup          = vlan_setup,
        .validate       = vlan_validate,
        .newlink        = vlan_newlink,
index d34b6da..c718fd3 100644 (file)
@@ -168,13 +168,13 @@ err:
 
 int vlan_proc_add_dev(struct net_device *vlandev)
 {
-       struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
-       dev_info->dent =
+       vlan->dent =
                proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
                                 vn->proc_vlan_dir, &vlandev_fops, vlandev);
-       if (!dev_info->dent)
+       if (!vlan->dent)
                return -ENOBUFS;
        return 0;
 }
@@ -187,10 +187,10 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
        /** NOTE:  This will consume the memory pointed to by dent, it seems. */
-       if (vlan_dev_info(vlandev)->dent) {
-               remove_proc_entry(vlan_dev_info(vlandev)->dent->name,
+       if (vlan_dev_priv(vlandev)->dent) {
+               remove_proc_entry(vlan_dev_priv(vlandev)->dent->name,
                                  vn->proc_vlan_dir);
-               vlan_dev_info(vlandev)->dent = NULL;
+               vlan_dev_priv(vlandev)->dent = NULL;
        }
        return 0;
 }
@@ -268,10 +268,10 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
                           nmtype ? nmtype :  "UNKNOWN");
        } else {
                const struct net_device *vlandev = v;
-               const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+               const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
 
                seq_printf(seq, "%-15s| %d  | %s\n",  vlandev->name,
-                          dev_info->vlan_id,    dev_info->real_dev->name);
+                          vlan->vlan_id,    vlan->real_dev->name);
        }
        return 0;
 }
@@ -279,7 +279,7 @@ static int vlan_seq_show(struct seq_file *seq, void *v)
 static int vlandev_seq_show(struct seq_file *seq, void *offset)
 {
        struct net_device *vlandev = (struct net_device *) seq->private;
-       const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
+       const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct rtnl_link_stats64 temp;
        const struct rtnl_link_stats64 *stats;
        static const char fmt64[] = "%30s %12llu\n";
@@ -291,8 +291,8 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        stats = dev_get_stats(vlandev, &temp);
        seq_printf(seq,
                   "%s  VID: %d  REORDER_HDR: %i  dev->priv_flags: %hx\n",
-                  vlandev->name, dev_info->vlan_id,
-                  (int)(dev_info->flags & 1), vlandev->priv_flags);
+                  vlandev->name, vlan->vlan_id,
+                  (int)(vlan->flags & 1), vlandev->priv_flags);
 
        seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
        seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
@@ -300,23 +300,23 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
        seq_puts(seq, "\n");
        seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
        seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
-       seq_printf(seq, "Device: %s", dev_info->real_dev->name);
+       seq_printf(seq, "Device: %s", vlan->real_dev->name);
        /* now show all PRIORITY mappings relating to this VLAN */
        seq_printf(seq, "\nINGRESS priority mappings: "
                        "0:%u  1:%u  2:%u  3:%u  4:%u  5:%u  6:%u 7:%u\n",
-                  dev_info->ingress_priority_map[0],
-                  dev_info->ingress_priority_map[1],
-                  dev_info->ingress_priority_map[2],
-                  dev_info->ingress_priority_map[3],
-                  dev_info->ingress_priority_map[4],
-                  dev_info->ingress_priority_map[5],
-                  dev_info->ingress_priority_map[6],
-                  dev_info->ingress_priority_map[7]);
+                  vlan->ingress_priority_map[0],
+                  vlan->ingress_priority_map[1],
+                  vlan->ingress_priority_map[2],
+                  vlan->ingress_priority_map[3],
+                  vlan->ingress_priority_map[4],
+                  vlan->ingress_priority_map[5],
+                  vlan->ingress_priority_map[6],
+                  vlan->ingress_priority_map[7]);
 
        seq_printf(seq, " EGRESS priority mappings: ");
        for (i = 0; i < 16; i++) {
                const struct vlan_priority_tci_mapping *mp
-                       = dev_info->egress_priority_map[i];
+                       = vlan->egress_priority_map[i];
                while (mp) {
                        seq_printf(seq, "%u:%hu ",
                                   mp->priority, ((mp->vlan_qos >> 13) & 0x7));
index a073148..e07272d 100644 (file)
@@ -215,6 +215,7 @@ source "net/sched/Kconfig"
 source "net/dcb/Kconfig"
 source "net/dns_resolver/Kconfig"
 source "net/batman-adv/Kconfig"
+source "net/openvswitch/Kconfig"
 
 config RPS
        boolean
@@ -232,6 +233,19 @@ config XPS
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
 
+config NETPRIO_CGROUP
+       tristate "Network priority cgroup"
+       depends on CGROUPS
+       ---help---
+         Cgroup subsystem for use in assigning processes to network priorities on
+         a per-interface basis
+
+config BQL
+       boolean
+       depends on SYSFS
+       select DQL
+       default y
+
 config HAVE_BPF_JIT
        bool
 
index acdde49..ad432fa 100644 (file)
@@ -69,3 +69,4 @@ obj-$(CONFIG_DNS_RESOLVER)    += dns_resolver/
 obj-$(CONFIG_CEPH_LIB)         += ceph/
 obj-$(CONFIG_BATMAN_ADV)       += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
+obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
index f41f026..876fbe8 100644 (file)
@@ -26,7 +26,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
                                 gfp_t gfp_flags)
 {
        struct sock *sk = sk_atm(vcc);
-       int guess = atm_guess_pdu2truesize(pdu_size);
+       int guess = SKB_TRUESIZE(pdu_size);
 
        atm_force_charge(vcc, guess);
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
index d07223c..353fccf 100644 (file)
@@ -53,6 +53,7 @@ static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
 static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
 static const unsigned char llc_oui_pid_pad[] =
                        { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
+static const unsigned char pad[] = { PAD_BRIDGED };
 static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
 static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
 
@@ -202,7 +203,10 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 {
        struct br2684_dev *brdev = BRPRIV(dev);
        struct atm_vcc *atmvcc;
-       int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2;
+       int minheadroom = (brvcc->encaps == e_llc) ?
+               ((brdev->payload == p_bridged) ?
+                       sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
+               ((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
 
        if (skb_headroom(skb) < minheadroom) {
                struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
@@ -450,7 +454,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
                        skb->pkt_type = PACKET_HOST;
                } else { /* p_bridged */
                        /* first 2 chars should be 0 */
-                       if (*((u16 *) (skb->data)) != 0)
+                       if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
                                goto error;
                        skb_pull(skb, BR2684_PAD_LEN);
                        skb->protocol = eth_type_trans(skb, net_dev);
@@ -489,15 +493,11 @@ free_skb:
  */
 static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
 {
-       struct sk_buff_head queue;
-       int err;
        struct br2684_vcc *brvcc;
-       struct sk_buff *skb, *tmp;
-       struct sk_buff_head *rq;
        struct br2684_dev *brdev;
        struct net_device *net_dev;
        struct atm_backend_br2684 be;
-       unsigned long flags;
+       int err;
 
        if (copy_from_user(&be, arg, sizeof be))
                return -EFAULT;
@@ -550,23 +550,6 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
        atmvcc->push = br2684_push;
        atmvcc->pop = br2684_pop;
 
-       __skb_queue_head_init(&queue);
-       rq = &sk_atm(atmvcc)->sk_receive_queue;
-
-       spin_lock_irqsave(&rq->lock, flags);
-       skb_queue_splice_init(rq, &queue);
-       spin_unlock_irqrestore(&rq->lock, flags);
-
-       skb_queue_walk_safe(&queue, skb, tmp) {
-               struct net_device *dev;
-
-               br2684_push(atmvcc, skb);
-               dev = skb->dev;
-
-               dev->stats.rx_bytes -= skb->len;
-               dev->stats.rx_packets--;
-       }
-
        /* initialize netdev carrier state */
        if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
                netif_carrier_off(net_dev);
@@ -574,6 +557,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
                netif_carrier_on(net_dev);
 
        __module_get(THIS_MODULE);
+
+       /* re-process everything received between connection setup and
+          backend setup */
+       vcc_process_recv_queue(atmvcc);
        return 0;
 
 error:
@@ -600,6 +587,7 @@ static void br2684_setup(struct net_device *netdev)
        struct br2684_dev *brdev = BRPRIV(netdev);
 
        ether_setup(netdev);
+       netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
        brdev->net_dev = netdev;
 
        netdev->netdev_ops = &br2684_netdev_ops;
@@ -612,7 +600,7 @@ static void br2684_setup_routed(struct net_device *netdev)
        struct br2684_dev *brdev = BRPRIV(netdev);
 
        brdev->net_dev = netdev;
-       netdev->hard_header_len = 0;
+       netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
        netdev->netdev_ops = &br2684_netdev_ops_routed;
        netdev->addr_len = 0;
        netdev->mtu = 1500;
index 8523940..c12c258 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <net/route.h> /* for struct rtable and routing */
 #include <net/icmp.h> /* icmp_send */
+#include <net/arp.h>
 #include <linux/param.h> /* for HZ */
 #include <linux/uaccess.h>
 #include <asm/byteorder.h> /* for htons etc. */
@@ -119,7 +120,7 @@ out:
 /* The neighbour entry n->lock is held. */
 static int neigh_check_cb(struct neighbour *n)
 {
-       struct atmarp_entry *entry = NEIGH2ENTRY(n);
+       struct atmarp_entry *entry = neighbour_priv(n);
        struct clip_vcc *cv;
 
        for (cv = entry->vccs; cv; cv = cv->next) {
@@ -189,6 +190,13 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
        struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
 
        pr_debug("\n");
+
+       if (!clip_devs) {
+               atm_return(vcc, skb->truesize);
+               kfree_skb(skb);
+               return;
+       }
+
        if (!skb) {
                pr_debug("removing VCC %p\n", clip_vcc);
                if (clip_vcc->entry)
@@ -255,8 +263,10 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
 
 static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
+       __be32 *ip = (__be32 *) neigh->primary_key;
+
        pr_debug("(neigh %p, skb %p)\n", neigh, skb);
-       to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
+       to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip);
 }
 
 static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
@@ -277,72 +287,24 @@ static const struct neigh_ops clip_neigh_ops = {
 
 static int clip_constructor(struct neighbour *neigh)
 {
-       struct atmarp_entry *entry = NEIGH2ENTRY(neigh);
-       struct net_device *dev = neigh->dev;
-       struct in_device *in_dev;
-       struct neigh_parms *parms;
+       struct atmarp_entry *entry = neighbour_priv(neigh);
 
-       pr_debug("(neigh %p, entry %p)\n", neigh, entry);
-       neigh->type = inet_addr_type(&init_net, entry->ip);
-       if (neigh->type != RTN_UNICAST)
+       if (neigh->tbl->family != AF_INET)
                return -EINVAL;
 
-       rcu_read_lock();
-       in_dev = __in_dev_get_rcu(dev);
-       if (!in_dev) {
-               rcu_read_unlock();
+       if (neigh->type != RTN_UNICAST)
                return -EINVAL;
-       }
-
-       parms = in_dev->arp_parms;
-       __neigh_parms_put(neigh->parms);
-       neigh->parms = neigh_parms_clone(parms);
-       rcu_read_unlock();
 
+       neigh->nud_state = NUD_NONE;
        neigh->ops = &clip_neigh_ops;
-       neigh->output = neigh->nud_state & NUD_VALID ?
-           neigh->ops->connected_output : neigh->ops->output;
+       neigh->output = neigh->ops->output;
        entry->neigh = neigh;
        entry->vccs = NULL;
        entry->expires = jiffies - 1;
+
        return 0;
 }
 
-static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd)
-{
-       return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd);
-}
-
-static struct neigh_table clip_tbl = {
-       .family         = AF_INET,
-       .entry_size     = sizeof(struct neighbour)+sizeof(struct atmarp_entry),
-       .key_len        = 4,
-       .hash           = clip_hash,
-       .constructor    = clip_constructor,
-       .id             = "clip_arp_cache",
-
-       /* parameters are copied from ARP ... */
-       .parms = {
-               .tbl                    = &clip_tbl,
-               .base_reachable_time    = 30 * HZ,
-               .retrans_time           = 1 * HZ,
-               .gc_staletime           = 60 * HZ,
-               .reachable_time         = 30 * HZ,
-               .delay_probe_time       = 5 * HZ,
-               .queue_len              = 3,
-               .ucast_probes           = 3,
-               .mcast_probes           = 3,
-               .anycast_delay          = 1 * HZ,
-               .proxy_delay            = (8 * HZ) / 10,
-               .proxy_qlen             = 64,
-               .locktime               = 1 * HZ,
-       },
-       .gc_interval    = 30 * HZ,
-       .gc_thresh1     = 128,
-       .gc_thresh2     = 512,
-       .gc_thresh3     = 1024,
-};
-
 /* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */
 
 /*
@@ -376,28 +338,19 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       n = dst_get_neighbour(dst);
+       n = dst_get_neighbour_noref(dst);
        if (!n) {
-#if 0
-               n = clip_find_neighbour(skb_dst(skb), 1);
-               if (!n) {
-                       dev_kfree_skb(skb);     /* lost that one */
-                       dev->stats.tx_dropped++;
-                       return 0;
-               }
-               dst_set_neighbour(dst, n);
-#endif
                pr_err("NO NEIGHBOUR !\n");
                dev_kfree_skb(skb);
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       entry = NEIGH2ENTRY(n);
+       entry = neighbour_priv(n);
        if (!entry->vccs) {
                if (time_after(jiffies, entry->expires)) {
                        /* should be resolved */
                        entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
-                       to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
+                       to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key));
                }
                if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
                        skb_queue_tail(&entry->neigh->arp_queue, skb);
@@ -448,10 +401,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
 
 static int clip_mkip(struct atm_vcc *vcc, int timeout)
 {
-       struct sk_buff_head *rq, queue;
        struct clip_vcc *clip_vcc;
-       struct sk_buff *skb, *tmp;
-       unsigned long flags;
 
        if (!vcc->push)
                return -EBADFD;
@@ -472,29 +422,9 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
        vcc->push = clip_push;
        vcc->pop = clip_pop;
 
-       __skb_queue_head_init(&queue);
-       rq = &sk_atm(vcc)->sk_receive_queue;
-
-       spin_lock_irqsave(&rq->lock, flags);
-       skb_queue_splice_init(rq, &queue);
-       spin_unlock_irqrestore(&rq->lock, flags);
-
        /* re-process everything received between connection setup and MKIP */
-       skb_queue_walk_safe(&queue, skb, tmp) {
-               if (!clip_devs) {
-                       atm_return(vcc, skb->truesize);
-                       kfree_skb(skb);
-               } else {
-                       struct net_device *dev = skb->dev;
-                       unsigned int len = skb->len;
-
-                       skb_get(skb);
-                       clip_push(vcc, skb);
-                       dev->stats.rx_packets--;
-                       dev->stats.rx_bytes -= len;
-                       kfree_skb(skb);
-               }
-       }
+       vcc_process_recv_queue(vcc);
+
        return 0;
 }
 
@@ -523,11 +453,11 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
        rt = ip_route_output(&init_net, ip, 0, 1, 0);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
-       neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
+       neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1);
        ip_rt_put(rt);
        if (!neigh)
                return -ENOMEM;
-       entry = NEIGH2ENTRY(neigh);
+       entry = neighbour_priv(neigh);
        if (entry != clip_vcc->entry) {
                if (!clip_vcc->entry)
                        pr_debug("add\n");
@@ -544,13 +474,15 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
 }
 
 static const struct net_device_ops clip_netdev_ops = {
-       .ndo_start_xmit = clip_start_xmit,
+       .ndo_start_xmit         = clip_start_xmit,
+       .ndo_neigh_construct    = clip_constructor,
 };
 
 static void clip_setup(struct net_device *dev)
 {
        dev->netdev_ops = &clip_netdev_ops;
        dev->type = ARPHRD_ATM;
+       dev->neigh_priv_len = sizeof(struct atmarp_entry);
        dev->hard_header_len = RFC1483LLC_LEN;
        dev->mtu = RFC1626_MTU;
        dev->tx_queue_len = 100;        /* "normal" queue (packets) */
@@ -604,10 +536,8 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
 
-       if (event == NETDEV_UNREGISTER) {
-               neigh_ifdown(&clip_tbl, dev);
+       if (event == NETDEV_UNREGISTER)
                return NOTIFY_DONE;
-       }
 
        /* ignore non-CLIP devices */
        if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops)
@@ -787,9 +717,10 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
 /* This means the neighbour entry has no attached VCC objects. */
 #define SEQ_NO_VCC_TOKEN       ((void *) 2)
 
-static void atmarp_info(struct seq_file *seq, struct net_device *dev,
+static void atmarp_info(struct seq_file *seq, struct neighbour *n,
                        struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
 {
+       struct net_device *dev = n->dev;
        unsigned long exp;
        char buf[17];
        int svc, llc, off;
@@ -809,8 +740,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev,
        seq_printf(seq, "%-6s%-4s%-4s%5ld ",
                   dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp);
 
-       off = scnprintf(buf, sizeof(buf) - 1, "%pI4",
-                       &entry->ip);
+       off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key);
        while (off < 16)
                buf[off++] = ' ';
        buf[off] = '\0';
@@ -881,14 +811,17 @@ static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
 {
        struct clip_seq_state *state = (struct clip_seq_state *)_state;
 
-       return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
+       if (n->dev->type != ARPHRD_ATM)
+               return NULL;
+
+       return clip_seq_vcc_walk(state, neighbour_priv(n), pos);
 }
 
 static void *clip_seq_start(struct seq_file *seq, loff_t * pos)
 {
        struct clip_seq_state *state = seq->private;
        state->ns.neigh_sub_iter = clip_seq_sub_iter;
-       return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
+       return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY);
 }
 
 static int clip_seq_show(struct seq_file *seq, void *v)
@@ -900,10 +833,10 @@ static int clip_seq_show(struct seq_file *seq, void *v)
                seq_puts(seq, atm_arp_banner);
        } else {
                struct clip_seq_state *state = seq->private;
-               struct neighbour *n = v;
                struct clip_vcc *vcc = state->vcc;
+               struct neighbour *n = v;
 
-               atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
+               atmarp_info(seq, n, neighbour_priv(n), vcc);
        }
        return 0;
 }
@@ -934,9 +867,6 @@ static void atm_clip_exit_noproc(void);
 
 static int __init atm_clip_init(void)
 {
-       neigh_table_init_no_netlink(&clip_tbl);
-
-       clip_tbl_hook = &clip_tbl;
        register_atm_ioctl(&clip_ioctl_ops);
        register_netdevice_notifier(&clip_dev_notifier);
        register_inetaddr_notifier(&clip_inet_notifier);
@@ -973,12 +903,6 @@ static void atm_clip_exit_noproc(void)
         */
        del_timer_sync(&idle_timer);
 
-       /* Next, purge the table, so that the device
-        * unregister loop below does not hang due to
-        * device references remaining in the table.
-        */
-       neigh_ifdown(&clip_tbl, NULL);
-
        dev = clip_devs;
        while (dev) {
                next = PRIV(dev)->next;
@@ -986,11 +910,6 @@ static void atm_clip_exit_noproc(void)
                free_netdev(dev);
                dev = next;
        }
-
-       /* Now it is safe to fully shutdown whole table. */
-       neigh_table_clear(&clip_tbl);
-
-       clip_tbl_hook = NULL;
 }
 
 static void __exit atm_clip_exit(void)
index 14ff9fe..b4b44db 100644 (file)
@@ -214,6 +214,26 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
 }
 EXPORT_SYMBOL(vcc_release_async);
 
+void vcc_process_recv_queue(struct atm_vcc *vcc)
+{
+       struct sk_buff_head queue, *rq;
+       struct sk_buff *skb, *tmp;
+       unsigned long flags;
+
+       __skb_queue_head_init(&queue);
+       rq = &sk_atm(vcc)->sk_receive_queue;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       skb_queue_splice_init(rq, &queue);
+       spin_unlock_irqrestore(&rq->lock, flags);
+
+       skb_queue_walk_safe(&queue, skb, tmp) {
+               __skb_unlink(skb, &queue);
+               vcc->push(vcc, skb);
+       }
+}
+EXPORT_SYMBOL(vcc_process_recv_queue);
+
 void atm_dev_signal_change(struct atm_dev *dev, char signal)
 {
        pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
@@ -502,8 +522,11 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 
        if (sock->state != SS_CONNECTED)
                return -ENOTCONN;
-       if (flags & ~MSG_DONTWAIT)              /* only handle MSG_DONTWAIT */
+
+       /* only handle MSG_DONTWAIT and MSG_PEEK */
+       if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
                return -EOPNOTSUPP;
+
        vcc = ATM_SD(sock);
        if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
            test_bit(ATM_VF_CLOSE, &vcc->flags) ||
@@ -524,8 +547,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
        if (error)
                return error;
        sock_recv_ts_and_drops(msg, sk, skb);
-       pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
-       atm_return(vcc, skb->truesize);
+
+       if (!(flags & MSG_PEEK)) {
+               pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
+                        skb->truesize);
+               atm_return(vcc, skb->truesize);
+       }
+
        skb_free_datagram(sk, skb);
        return copied;
 }
index f48a76b..cc3c2da 100644 (file)
@@ -24,6 +24,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
                   char __user *optval, unsigned int optlen);
 int vcc_getsockopt(struct socket *sock, int level, int optname,
                   char __user *optval, int __user *optlen);
+void vcc_process_recv_queue(struct atm_vcc *vcc);
 
 int atmpvc_init(void);
 void atmpvc_exit(void);
index db4a11c..df35d9a 100644 (file)
@@ -303,6 +303,10 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
        atmvcc->push = pppoatm_push;
        atmvcc->pop = pppoatm_pop;
        __module_get(THIS_MODULE);
+
+       /* re-process everything received between connection setup and
+          backend setup */
+       vcc_process_recv_queue(atmvcc);
        return 0;
 }
 
index e7c69f4..b863c18 100644 (file)
@@ -402,14 +402,14 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
                break;
 
        case AX25_T1:
-               if (ax25_ctl.arg < 1)
+               if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
                        goto einval_put;
                ax25->rtt = (ax25_ctl.arg * HZ) / 2;
                ax25->t1  = ax25_ctl.arg * HZ;
                break;
 
        case AX25_T2:
-               if (ax25_ctl.arg < 1)
+               if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ)
                        goto einval_put;
                ax25->t2 = ax25_ctl.arg * HZ;
                break;
@@ -422,10 +422,15 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
                break;
 
        case AX25_T3:
+               if (ax25_ctl.arg > ULONG_MAX / HZ)
+                       goto einval_put;
                ax25->t3 = ax25_ctl.arg * HZ;
                break;
 
        case AX25_IDLE:
+               if (ax25_ctl.arg > ULONG_MAX / (60 * HZ))
+                       goto einval_put;
+
                ax25->idle = ax25_ctl.arg * 60 * HZ;
                break;
 
@@ -571,7 +576,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_T1:
-               if (opt < 1) {
+               if (opt < 1 || opt > ULONG_MAX / HZ) {
                        res = -EINVAL;
                        break;
                }
@@ -580,7 +585,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_T2:
-               if (opt < 1) {
+               if (opt < 1 || opt > ULONG_MAX / HZ) {
                        res = -EINVAL;
                        break;
                }
@@ -596,7 +601,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_T3:
-               if (opt < 1) {
+               if (opt < 1 || opt > ULONG_MAX / HZ) {
                        res = -EINVAL;
                        break;
                }
@@ -604,7 +609,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case AX25_IDLE:
-               if (opt < 0) {
+               if (opt < 0 || opt > ULONG_MAX / (60 * HZ)) {
                        res = -EINVAL;
                        break;
                }
index b8a7414..c25492f 100644 (file)
@@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count,
        unsigned long uint_val;
        int ret;
 
-       ret = strict_strtoul(buff, 10, &uint_val);
+       ret = kstrtoul(buff, 10, &uint_val);
        if (ret) {
                bat_info(net_dev,
                         "%s: Invalid parameter received: %s\n",
@@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
        unsigned long val;
        int ret, vis_mode_tmp = -1;
 
-       ret = strict_strtoul(buff, 10, &val);
+       ret = kstrtoul(buff, 10, &val);
 
        if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
            (strncmp(buff, "client", 6) == 0) ||
index 0be9ff3..9bc63b2 100644 (file)
@@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
        /* sequence number is much newer, probably missed a lot of packets */
 
        if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
-               || (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
+               && (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
                bat_dbg(DBG_BATMAN, bat_priv,
                        "We missed a lot of packets (%i) !\n",
                        seq_num_diff - 1);
index 619fb73..24403a7 100644 (file)
@@ -25,6 +25,7 @@
 #include "gateway_common.h"
 #include "hard-interface.h"
 #include "originator.h"
+#include "translation-table.h"
 #include "routing.h"
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -572,108 +573,142 @@ out:
        return ret;
 }
 
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
-                struct orig_node *old_gw)
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
 {
        struct ethhdr *ethhdr;
        struct iphdr *iphdr;
        struct ipv6hdr *ipv6hdr;
        struct udphdr *udphdr;
-       struct gw_node *curr_gw;
-       struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
-       unsigned int header_len = 0;
-       int ret = 1;
-
-       if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
-               return 0;
 
        /* check for ethernet header */
-       if (!pskb_may_pull(skb, header_len + ETH_HLEN))
-               return 0;
+       if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
+               return false;
        ethhdr = (struct ethhdr *)skb->data;
-       header_len += ETH_HLEN;
+       *header_len += ETH_HLEN;
 
        /* check for initial vlan header */
        if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
-               if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
-                       return 0;
+               if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
+                       return false;
                ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
-               header_len += VLAN_HLEN;
+               *header_len += VLAN_HLEN;
        }
 
        /* check for ip header */
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_IP:
-               if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
-                       return 0;
-               iphdr = (struct iphdr *)(skb->data + header_len);
-               header_len += iphdr->ihl * 4;
+               if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
+                       return false;
+               iphdr = (struct iphdr *)(skb->data + *header_len);
+               *header_len += iphdr->ihl * 4;
 
                /* check for udp header */
                if (iphdr->protocol != IPPROTO_UDP)
-                       return 0;
+                       return false;
 
                break;
        case ETH_P_IPV6:
-               if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
-                       return 0;
-               ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
-               header_len += sizeof(*ipv6hdr);
+               if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
+                       return false;
+               ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
+               *header_len += sizeof(*ipv6hdr);
 
                /* check for udp header */
                if (ipv6hdr->nexthdr != IPPROTO_UDP)
-                       return 0;
+                       return false;
 
                break;
        default:
-               return 0;
+               return false;
        }
 
-       if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
-               return 0;
-       udphdr = (struct udphdr *)(skb->data + header_len);
-       header_len += sizeof(*udphdr);
+       if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
+               return false;
+       udphdr = (struct udphdr *)(skb->data + *header_len);
+       *header_len += sizeof(*udphdr);
 
        /* check for bootp port */
        if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
             (ntohs(udphdr->dest) != 67))
-               return 0;
+               return false;
 
        if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
            (ntohs(udphdr->dest) != 547))
-               return 0;
+               return false;
 
-       if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
-               return -1;
+       return true;
+}
 
-       curr_gw = gw_get_selected_gw_node(bat_priv);
-       if (!curr_gw)
-               return 0;
-
-       /* If old_gw != NULL then this packet is unicast.
-        * So, at this point we have to check the message type: if it is a
-        * DHCPREQUEST we have to decide whether to drop it or not */
-       if (old_gw && curr_gw->orig_node != old_gw) {
-               if (is_type_dhcprequest(skb, header_len)) {
-                       /* If the dhcp packet has been sent to a different gw,
-                        * we have to evaluate whether the old gw is still
-                        * reliable enough */
-                       neigh_curr = find_router(bat_priv, curr_gw->orig_node,
-                                                NULL);
-                       neigh_old = find_router(bat_priv, old_gw, NULL);
-                       if (!neigh_curr || !neigh_old)
-                               goto free_neigh;
-                       if (neigh_curr->tq_avg - neigh_old->tq_avg <
-                                                               GW_THRESHOLD)
-                               ret = -1;
-               }
+bool gw_out_of_range(struct bat_priv *bat_priv,
+                    struct sk_buff *skb, struct ethhdr *ethhdr)
+{
+       struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+       struct orig_node *orig_dst_node = NULL;
+       struct gw_node *curr_gw = NULL;
+       bool ret, out_of_range = false;
+       unsigned int header_len = 0;
+       uint8_t curr_tq_avg;
+
+       ret = gw_is_dhcp_target(skb, &header_len);
+       if (!ret)
+               goto out;
+
+       orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
+                                         ethhdr->h_dest);
+       if (!orig_dst_node)
+               goto out;
+
+       if (!orig_dst_node->gw_flags)
+               goto out;
+
+       ret = is_type_dhcprequest(skb, header_len);
+       if (!ret)
+               goto out;
+
+       switch (atomic_read(&bat_priv->gw_mode)) {
+       case GW_MODE_SERVER:
+               /* If we are a GW then we are our best GW. We can artificially
+                * set the tq towards ourself as the maximum value */
+               curr_tq_avg = TQ_MAX_VALUE;
+               break;
+       case GW_MODE_CLIENT:
+               curr_gw = gw_get_selected_gw_node(bat_priv);
+               if (!curr_gw)
+                       goto out;
+
+               /* packet is going to our gateway */
+               if (curr_gw->orig_node == orig_dst_node)
+                       goto out;
+
+               /* If the dhcp packet has been sent to a different gw,
+                * we have to evaluate whether the old gw is still
+                * reliable enough */
+               neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+               if (!neigh_curr)
+                       goto out;
+
+               curr_tq_avg = neigh_curr->tq_avg;
+               break;
+       case GW_MODE_OFF:
+       default:
+               goto out;
        }
-free_neigh:
+
+       neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+       if (!neigh_old)
+               goto out;
+
+       if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+               out_of_range = true;
+
+out:
+       if (orig_dst_node)
+               orig_node_free_ref(orig_dst_node);
+       if (curr_gw)
+               gw_node_free_ref(curr_gw);
        if (neigh_old)
                neigh_node_free_ref(neigh_old);
        if (neigh_curr)
                neigh_node_free_ref(neigh_curr);
-       if (curr_gw)
-               gw_node_free_ref(curr_gw);
-       return ret;
+       return out_of_range;
 }
index b9b983c..e1edba0 100644 (file)
@@ -31,7 +31,8 @@ void gw_node_update(struct bat_priv *bat_priv,
 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
 void gw_node_purge(struct bat_priv *bat_priv);
 int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
-                struct orig_node *old_gw);
+bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool gw_out_of_range(struct bat_priv *bat_priv,
+                    struct sk_buff *skb, struct ethhdr *ethhdr);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
index 18661af..c4ac7b0 100644 (file)
@@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                        *tmp_ptr = '\0';
        }
 
-       ret = strict_strtol(buff, 10, &ldown);
+       ret = kstrtol(buff, 10, &ldown);
        if (ret) {
                bat_err(net_dev,
                        "Download speed of gateway mode invalid: %s\n",
@@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                                *tmp_ptr = '\0';
                }
 
-               ret = strict_strtol(slash_ptr + 1, 10, &lup);
+               ret = kstrtol(slash_ptr + 1, 10, &lup);
                if (ret) {
                        bat_err(net_dev,
                                "Upload speed of gateway mode invalid: "
index 2a17250..d1da29d 100644 (file)
@@ -25,7 +25,7 @@
 /* clears the hash */
 static void hash_init(struct hashtable_t *hash)
 {
-       int i;
+       uint32_t i;
 
        for (i = 0 ; i < hash->size; i++) {
                INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash)
 }
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size)
+struct hashtable_t *hash_new(uint32_t size)
 {
        struct hashtable_t *hash;
 
index d20aa71..4768717 100644 (file)
@@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
 /* the hashfunction, should return an index
  * based on the key in the data of the first
  * argument and the size the second */
-typedef int (*hashdata_choose_cb)(const void *, int);
+typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
 typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
 
 struct hashtable_t {
        struct hlist_head *table;   /* the hashtable itself with the buckets */
        spinlock_t *list_locks;     /* spinlock for each hash list entry */
-       int size;                   /* size of hashtable */
+       uint32_t size;              /* size of hashtable */
 };
 
 /* allocates and clears the hash */
-struct hashtable_t *hash_new(int size);
+struct hashtable_t *hash_new(uint32_t size);
 
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash);
@@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash,
        struct hlist_head *head;
        struct hlist_node *node, *node_tmp;
        spinlock_t *list_lock; /* spinlock to protect write access */
-       int i;
+       uint32_t i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -93,7 +93,8 @@ static inline int hash_add(struct hashtable_t *hash,
                           hashdata_choose_cb choose,
                           const void *data, struct hlist_node *data_node)
 {
-       int index, ret = -1;
+       uint32_t index;
+       int ret = -1;
        struct hlist_head *head;
        struct hlist_node *node;
        spinlock_t *list_lock; /* spinlock to protect write access */
@@ -137,7 +138,7 @@ static inline void *hash_remove(struct hashtable_t *hash,
                                hashdata_compare_cb compare,
                                hashdata_choose_cb choose, void *data)
 {
-       size_t index;
+       uint32_t index;
        struct hlist_node *node;
        struct hlist_head *head;
        void *data_save = NULL;
index ac3520e..d9c1e7b 100644 (file)
@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
 
        spin_unlock_bh(&socket_client->lock);
 
-       error = __copy_to_user(buf, &socket_packet->icmp_packet,
-                              socket_packet->icmp_len);
+       packet_len = min(count, socket_packet->icmp_len);
+       error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
 
-       packet_len = socket_packet->icmp_len;
        kfree(socket_packet);
 
        if (error)
@@ -187,12 +186,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        skb_reserve(skb, sizeof(struct ethhdr));
        icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
 
-       if (!access_ok(VERIFY_READ, buff, packet_len)) {
-               len = -EFAULT;
-               goto free_skb;
-       }
-
-       if (__copy_from_user(icmp_packet, buff, packet_len)) {
+       if (copy_from_user(icmp_packet, buff, packet_len)) {
                len = -EFAULT;
                goto free_skb;
        }
@@ -217,7 +211,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
 
        if (icmp_packet->version != COMPAT_VERSION) {
                icmp_packet->msg_type = PARAMETER_PROBLEM;
-               icmp_packet->ttl = COMPAT_VERSION;
+               icmp_packet->version = COMPAT_VERSION;
                bat_socket_add_packet(socket_client, icmp_packet, packet_len);
                goto free_skb;
        }
index 964ad4d..86354e0 100644 (file)
@@ -28,7 +28,7 @@
 #define DRIVER_DEVICE "batman-adv"
 
 #ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.4.0"
+#define SOURCE_VERSION "2012.0.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index 0e5b772..0bc2045 100644 (file)
@@ -164,7 +164,7 @@ void originator_free(struct bat_priv *bat_priv)
        struct hlist_head *head;
        spinlock_t *list_lock; /* spinlock to protect write access */
        struct orig_node *orig_node;
-       int i;
+       uint32_t i;
 
        if (!hash)
                return;
@@ -350,7 +350,7 @@ static void _purge_orig(struct bat_priv *bat_priv)
        struct hlist_head *head;
        spinlock_t *list_lock; /* spinlock to protect write access */
        struct orig_node *orig_node;
-       int i;
+       uint32_t i;
 
        if (!hash)
                return;
@@ -413,7 +413,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
        int batman_count = 0;
        int last_seen_secs;
        int last_seen_msecs;
-       int i, ret = 0;
+       uint32_t i;
+       int ret = 0;
 
        primary_if = primary_if_get_selected(bat_priv);
 
@@ -519,7 +520,8 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
        struct hlist_node *node;
        struct hlist_head *head;
        struct orig_node *orig_node;
-       int i, ret;
+       uint32_t i;
+       int ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
@@ -601,7 +603,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
        struct hlist_head *head;
        struct hard_iface *hard_iface_tmp;
        struct orig_node *orig_node;
-       int i, ret;
+       uint32_t i;
+       int ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
index cfc1f60..67765ff 100644 (file)
@@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
 
 /* hashfunction to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline int choose_orig(const void *data, int32_t size)
+static inline uint32_t choose_orig(const void *data, uint32_t size)
 {
        const unsigned char *key = data;
        uint32_t hash = 0;
index f961cc5..773e606 100644 (file)
@@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
        struct hlist_head *head;
        struct orig_node *orig_node;
        unsigned long *word;
-       int i;
+       uint32_t i;
        size_t word_index;
 
        for (i = 0; i < hash->size; i++) {
@@ -578,6 +578,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
 {
        struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct tt_query_packet *tt_query;
+       uint16_t tt_len;
        struct ethhdr *ethhdr;
 
        /* drop packet if it has not necessary minimum size */
@@ -616,13 +617,21 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
                }
                break;
        case TT_RESPONSE:
-               /* packet needs to be linearized to access the TT changes */
-               if (skb_linearize(skb) < 0)
-                       goto out;
+               if (is_my_mac(tt_query->dst)) {
+                       /* packet needs to be linearized to access the TT
+                        * changes */
+                       if (skb_linearize(skb) < 0)
+                               goto out;
+
+                       tt_len = tt_query->tt_data * sizeof(struct tt_change);
+
+                       /* Ensure we have all the claimed data */
+                       if (unlikely(skb_headlen(skb) <
+                                    sizeof(struct tt_query_packet) + tt_len))
+                               goto out;
 
-               if (is_my_mac(tt_query->dst))
                        handle_tt_response(bat_priv, tt_query);
-               else {
+               else {
                        bat_dbg(DBG_TT, bat_priv,
                                "Routing TT_RESPONSE to %pM [%c]\n",
                                tt_query->dst,
index f9cc957..987c75a 100644 (file)
@@ -563,10 +563,10 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        struct bcast_packet *bcast_packet;
        struct vlan_ethhdr *vhdr;
        struct softif_neigh *curr_softif_neigh = NULL;
-       struct orig_node *orig_node = NULL;
+       unsigned int header_len = 0;
        int data_len = skb->len, ret;
        short vid = -1;
-       bool do_bcast;
+       bool do_bcast = false;
 
        if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
                goto dropped;
@@ -598,17 +598,28 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
        /* Register the client MAC in the transtable */
        tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
 
-       orig_node = transtable_search(bat_priv, ethhdr->h_source,
-                                     ethhdr->h_dest);
-       do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
-       if (do_bcast || (orig_node && orig_node->gw_flags)) {
-               ret = gw_is_target(bat_priv, skb, orig_node);
+       if (is_multicast_ether_addr(ethhdr->h_dest)) {
+               do_bcast = true;
 
-               if (ret < 0)
-                       goto dropped;
-
-               if (ret)
-                       do_bcast = false;
+               switch (atomic_read(&bat_priv->gw_mode)) {
+               case GW_MODE_SERVER:
+                       /* gateway servers should not send dhcp
+                        * requests into the mesh */
+                       ret = gw_is_dhcp_target(skb, &header_len);
+                       if (ret)
+                               goto dropped;
+                       break;
+               case GW_MODE_CLIENT:
+                       /* gateway clients should send dhcp requests
+                        * via unicast to their gateway */
+                       ret = gw_is_dhcp_target(skb, &header_len);
+                       if (ret)
+                               do_bcast = false;
+                       break;
+               case GW_MODE_OFF:
+               default:
+                       break;
+               }
        }
 
        /* ethernet packet should be broadcasted */
@@ -644,6 +655,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
 
        /* unicast packet */
        } else {
+               if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
+                       ret = gw_out_of_range(bat_priv, skb, ethhdr);
+                       if (ret)
+                               goto dropped;
+               }
+
                ret = unicast_send_skb(skb, bat_priv);
                if (ret != 0)
                        goto dropped_freed;
@@ -662,8 +679,6 @@ end:
                softif_neigh_free_ref(curr_softif_neigh);
        if (primary_if)
                hardif_free_ref(primary_if);
-       if (orig_node)
-               orig_node_free_ref(orig_node);
        return NETDEV_TX_OK;
 }
 
@@ -859,7 +874,7 @@ unreg_debugfs:
 unreg_sysfs:
        sysfs_del_meshif(soft_iface);
 unreg_soft_iface:
-       unregister_netdev(soft_iface);
+       unregister_netdevice(soft_iface);
        return NULL;
 
 free_soft_iface:
index 5f09a57..46a2b37 100644 (file)
@@ -36,18 +36,9 @@ static void _tt_global_del(struct bat_priv *bat_priv,
 static void tt_purge(struct work_struct *work);
 
 /* returns 1 if they are the same mac addr */
-static int compare_ltt(const struct hlist_node *node, const void *data2)
+static int compare_tt(const struct hlist_node *node, const void *data2)
 {
-       const void *data1 = container_of(node, struct tt_local_entry,
-                                        hash_entry);
-
-       return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* returns 1 if they are the same mac addr */
-static int compare_gtt(const struct hlist_node *node, const void *data2)
-{
-       const void *data1 = container_of(node, struct tt_global_entry,
+       const void *data1 = container_of(node, struct tt_common_entry,
                                         hash_entry);
 
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
@@ -60,14 +51,13 @@ static void tt_start_timer(struct bat_priv *bat_priv)
                           msecs_to_jiffies(5000));
 }
 
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
-                                                const void *data)
+static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
+                                           const void *data)
 {
-       struct hashtable_t *hash = bat_priv->tt_local_hash;
        struct hlist_head *head;
        struct hlist_node *node;
-       struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
-       int index;
+       struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
+       uint32_t index;
 
        if (!hash)
                return NULL;
@@ -76,51 +66,46 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
-               if (!compare_eth(tt_local_entry, data))
+       hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
+               if (!compare_eth(tt_common_entry, data))
                        continue;
 
-               if (!atomic_inc_not_zero(&tt_local_entry->refcount))
+               if (!atomic_inc_not_zero(&tt_common_entry->refcount))
                        continue;
 
-               tt_local_entry_tmp = tt_local_entry;
+               tt_common_entry_tmp = tt_common_entry;
                break;
        }
        rcu_read_unlock();
 
-       return tt_local_entry_tmp;
+       return tt_common_entry_tmp;
 }
 
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
-                                                  const void *data)
+static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
+                                                const void *data)
 {
-       struct hashtable_t *hash = bat_priv->tt_global_hash;
-       struct hlist_head *head;
-       struct hlist_node *node;
-       struct tt_global_entry *tt_global_entry;
-       struct tt_global_entry *tt_global_entry_tmp = NULL;
-       int index;
-
-       if (!hash)
-               return NULL;
-
-       index = choose_orig(data, hash->size);
-       head = &hash->table[index];
+       struct tt_common_entry *tt_common_entry;
+       struct tt_local_entry *tt_local_entry = NULL;
 
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
-               if (!compare_eth(tt_global_entry, data))
-                       continue;
+       tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+       if (tt_common_entry)
+               tt_local_entry = container_of(tt_common_entry,
+                                             struct tt_local_entry, common);
+       return tt_local_entry;
+}
 
-               if (!atomic_inc_not_zero(&tt_global_entry->refcount))
-                       continue;
+static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
+                                                  const void *data)
+{
+       struct tt_common_entry *tt_common_entry;
+       struct tt_global_entry *tt_global_entry = NULL;
 
-               tt_global_entry_tmp = tt_global_entry;
-               break;
-       }
-       rcu_read_unlock();
+       tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+       if (tt_common_entry)
+               tt_global_entry = container_of(tt_common_entry,
+                                              struct tt_global_entry, common);
+       return tt_global_entry;
 
-       return tt_global_entry_tmp;
 }
 
 static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
@@ -133,15 +118,18 @@ static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
 
 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
 {
-       if (atomic_dec_and_test(&tt_local_entry->refcount))
-               kfree_rcu(tt_local_entry, rcu);
+       if (atomic_dec_and_test(&tt_local_entry->common.refcount))
+               kfree_rcu(tt_local_entry, common.rcu);
 }
 
 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 {
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
 
-       tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
+       tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
+       tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+                                      common);
 
        if (tt_global_entry->orig_node)
                orig_node_free_ref(tt_global_entry->orig_node);
@@ -151,8 +139,9 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
 
 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
 {
-       if (atomic_dec_and_test(&tt_global_entry->refcount))
-               call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
+       if (atomic_dec_and_test(&tt_global_entry->common.refcount))
+               call_rcu(&tt_global_entry->common.rcu,
+                        tt_global_entry_free_rcu);
 }
 
 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -201,6 +190,7 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct bat_priv *bat_priv = netdev_priv(soft_iface);
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
+       int hash_added;
 
        tt_local_entry = tt_local_hash_find(bat_priv, addr);
 
@@ -217,26 +207,33 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
                (uint8_t)atomic_read(&bat_priv->ttvn));
 
-       memcpy(tt_local_entry->addr, addr, ETH_ALEN);
-       tt_local_entry->last_seen = jiffies;
-       tt_local_entry->flags = NO_FLAGS;
+       memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
+       tt_local_entry->common.flags = NO_FLAGS;
        if (is_wifi_iface(ifindex))
-               tt_local_entry->flags |= TT_CLIENT_WIFI;
-       atomic_set(&tt_local_entry->refcount, 2);
+               tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+       atomic_set(&tt_local_entry->common.refcount, 2);
+       tt_local_entry->last_seen = jiffies;
 
        /* the batman interface mac address should never be purged */
        if (compare_eth(addr, soft_iface->dev_addr))
-               tt_local_entry->flags |= TT_CLIENT_NOPURGE;
+               tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
+
+       hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
+                        &tt_local_entry->common,
+                        &tt_local_entry->common.hash_entry);
+
+       if (unlikely(hash_added != 0)) {
+               /* remove the reference for the hash */
+               tt_local_entry_free_ref(tt_local_entry);
+               goto out;
+       }
 
-       tt_local_event(bat_priv, addr, tt_local_entry->flags);
+       tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
 
        /* The local entry has to be marked as NEW to avoid to send it in
         * a full table response going out before the next ttvn increment
         * (consistency check) */
-       tt_local_entry->flags |= TT_CLIENT_NEW;
-
-       hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
-                tt_local_entry, &tt_local_entry->hash_entry);
+       tt_local_entry->common.flags |= TT_CLIENT_NEW;
 
        /* remove address from global hash if present */
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
@@ -247,10 +244,9 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                tt_global_entry->orig_node->tt_poss_change = true;
                /* The global entry has to be marked as ROAMING and has to be
                 * kept for consistency purpose */
-               tt_global_entry->flags |= TT_CLIENT_ROAM;
+               tt_global_entry->common.flags |= TT_CLIENT_PENDING;
                tt_global_entry->roam_at = jiffies;
-
-               send_roam_adv(bat_priv, tt_global_entry->addr,
+               send_roam_adv(bat_priv, tt_global_entry->common.addr,
                              tt_global_entry->orig_node);
        }
 out:
@@ -312,13 +308,12 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->tt_local_hash;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hard_iface *primary_if;
        struct hlist_node *node;
        struct hlist_head *head;
-       size_t buf_size, pos;
-       char *buff;
-       int i, ret = 0;
+       uint32_t i;
+       int ret = 0;
 
        primary_if = primary_if_get_selected(bat_priv);
        if (!primary_if) {
@@ -339,51 +334,27 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
                   "announced via TT (TTVN: %u):\n",
                   net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
 
-       buf_size = 1;
-       /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               __hlist_for_each_rcu(node, head)
-                       buf_size += 29;
-               rcu_read_unlock();
-       }
-
-       buff = kmalloc(buf_size, GFP_ATOMIC);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       buff[0] = '\0';
-       pos = 0;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       pos += snprintf(buff + pos, 30, " * %pM "
-                                       "[%c%c%c%c%c]\n",
-                                       tt_local_entry->addr,
-                                       (tt_local_entry->flags &
+                       seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
+                                       tt_common_entry->addr,
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_ROAM ? 'R' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_NOPURGE ? 'P' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_NEW ? 'N' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_PENDING ? 'X' : '.'),
-                                       (tt_local_entry->flags &
+                                       (tt_common_entry->flags &
                                         TT_CLIENT_WIFI ? 'W' : '.'));
                }
                rcu_read_unlock();
        }
-
-       seq_printf(seq, "%s", buff);
-       kfree(buff);
 out:
        if (primary_if)
                hardif_free_ref(primary_if);
@@ -394,13 +365,13 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
                                 struct tt_local_entry *tt_local_entry,
                                 uint16_t flags)
 {
-       tt_local_event(bat_priv, tt_local_entry->addr,
-                      tt_local_entry->flags | flags);
+       tt_local_event(bat_priv, tt_local_entry->common.addr,
+                      tt_local_entry->common.flags | flags);
 
        /* The local client has to be marked as "pending to be removed" but has
         * to be kept in the table in order to send it in a full table
         * response issued before the net ttvn increment (consistency check) */
-       tt_local_entry->flags |= TT_CLIENT_PENDING;
+       tt_local_entry->common.flags |= TT_CLIENT_PENDING;
 }
 
 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -416,7 +387,7 @@ void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
                             (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
 
        bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
-               "%s\n", tt_local_entry->addr, message);
+               "%s\n", tt_local_entry->common.addr, message);
 out:
        if (tt_local_entry)
                tt_local_entry_free_ref(tt_local_entry);
@@ -426,23 +397,27 @@ static void tt_local_purge(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_local_hash;
        struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
-       int i;
+       uint32_t i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
+                       if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
                                continue;
 
                        /* entry already marked for deletion */
-                       if (tt_local_entry->flags & TT_CLIENT_PENDING)
+                       if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
                                continue;
 
                        if (!is_out_of_time(tt_local_entry->last_seen,
@@ -453,7 +428,7 @@ static void tt_local_purge(struct bat_priv *bat_priv)
                                             TT_CLIENT_DEL);
                        bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
                                "pending to be removed: timed out\n",
-                               tt_local_entry->addr);
+                               tt_local_entry->common.addr);
                }
                spin_unlock_bh(list_lock);
        }
@@ -464,10 +439,11 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       struct tt_common_entry *tt_common_entry;
        struct tt_local_entry *tt_local_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       int i;
+       uint32_t i;
 
        if (!bat_priv->tt_local_hash)
                return;
@@ -479,9 +455,12 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
                        hlist_del_rcu(node);
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
                        tt_local_entry_free_ref(tt_local_entry);
                }
                spin_unlock_bh(list_lock);
@@ -529,6 +508,7 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
        struct tt_global_entry *tt_global_entry;
        struct orig_node *orig_node_tmp;
        int ret = 0;
+       int hash_added;
 
        tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
 
@@ -539,18 +519,24 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                if (!tt_global_entry)
                        goto out;
 
-               memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
+               memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+               tt_global_entry->common.flags = NO_FLAGS;
+               atomic_set(&tt_global_entry->common.refcount, 2);
                /* Assign the new orig_node */
                atomic_inc(&orig_node->refcount);
                tt_global_entry->orig_node = orig_node;
                tt_global_entry->ttvn = ttvn;
-               tt_global_entry->flags = NO_FLAGS;
                tt_global_entry->roam_at = 0;
-               atomic_set(&tt_global_entry->refcount, 2);
 
-               hash_add(bat_priv->tt_global_hash, compare_gtt,
-                        choose_orig, tt_global_entry,
-                        &tt_global_entry->hash_entry);
+               hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
+                                choose_orig, &tt_global_entry->common,
+                                &tt_global_entry->common.hash_entry);
+
+               if (unlikely(hash_added != 0)) {
+                       /* remove the reference for the hash */
+                       tt_global_entry_free_ref(tt_global_entry);
+                       goto out_remove;
+               }
                atomic_inc(&orig_node->tt_size);
        } else {
                if (tt_global_entry->orig_node != orig_node) {
@@ -561,20 +547,21 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
                        orig_node_free_ref(orig_node_tmp);
                        atomic_inc(&orig_node->tt_size);
                }
+               tt_global_entry->common.flags = NO_FLAGS;
                tt_global_entry->ttvn = ttvn;
-               tt_global_entry->flags = NO_FLAGS;
                tt_global_entry->roam_at = 0;
        }
 
        if (wifi)
-               tt_global_entry->flags |= TT_CLIENT_WIFI;
+               tt_global_entry->common.flags |= TT_CLIENT_WIFI;
 
        bat_dbg(DBG_TT, bat_priv,
                "Creating new global tt entry: %pM (via %pM)\n",
-               tt_global_entry->addr, orig_node->orig);
+               tt_global_entry->common.addr, orig_node->orig);
 
+out_remove:
        /* remove address from local hash if present */
-       tt_local_remove(bat_priv, tt_global_entry->addr,
+       tt_local_remove(bat_priv, tt_global_entry->common.addr,
                        "global tt received", roaming);
        ret = 1;
 out:
@@ -588,13 +575,13 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct bat_priv *bat_priv = netdev_priv(net_dev);
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hard_iface *primary_if;
        struct hlist_node *node;
        struct hlist_head *head;
-       size_t buf_size, pos;
-       char *buff;
-       int i, ret = 0;
+       uint32_t i;
+       int ret = 0;
 
        primary_if = primary_if_get_selected(bat_priv);
        if (!primary_if) {
@@ -617,53 +604,32 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq, "       %-13s %s       %-15s %s %s\n",
                   "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
 
-       buf_size = 1;
-       /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
-        * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               __hlist_for_each_rcu(node, head)
-                       buf_size += 67;
-               rcu_read_unlock();
-       }
-
-       buff = kmalloc(buf_size, GFP_ATOMIC);
-       if (!buff) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       buff[0] = '\0';
-       pos = 0;
-
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_global_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       pos += snprintf(buff + pos, 69,
-                                       " * %pM  (%3u) via %pM     (%3u)   "
-                                       "[%c%c%c]\n", tt_global_entry->addr,
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
+                       seq_printf(seq, " * %pM  (%3u) via %pM     (%3u)   "
+                                       "[%c%c%c]\n",
+                                       tt_global_entry->common.addr,
                                        tt_global_entry->ttvn,
                                        tt_global_entry->orig_node->orig,
                                        (uint8_t) atomic_read(
                                                &tt_global_entry->orig_node->
                                                last_ttvn),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_ROAM ? 'R' : '.'),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_PENDING ? 'X' : '.'),
-                                       (tt_global_entry->flags &
+                                       (tt_global_entry->common.flags &
                                         TT_CLIENT_WIFI ? 'W' : '.'));
                }
                rcu_read_unlock();
        }
-
-       seq_printf(seq, "%s", buff);
-       kfree(buff);
 out:
        if (primary_if)
                hardif_free_ref(primary_if);
@@ -679,13 +645,13 @@ static void _tt_global_del(struct bat_priv *bat_priv,
 
        bat_dbg(DBG_TT, bat_priv,
                "Deleting global tt entry %pM (via %pM): %s\n",
-               tt_global_entry->addr, tt_global_entry->orig_node->orig,
+               tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
                message);
 
        atomic_dec(&tt_global_entry->orig_node->tt_size);
 
-       hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
-                   tt_global_entry->addr);
+       hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
+                   tt_global_entry->common.addr);
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
@@ -713,9 +679,9 @@ void tt_global_del(struct bat_priv *bat_priv,
                         * 2) the client roamed to us => we can directly delete
                         *    the global entry, since it is useless now. */
                        tt_local_entry = tt_local_hash_find(bat_priv,
-                                                       tt_global_entry->addr);
+                                                           tt_global_entry->common.addr);
                        if (!tt_local_entry) {
-                               tt_global_entry->flags |= TT_CLIENT_ROAM;
+                               tt_global_entry->common.flags |= TT_CLIENT_ROAM;
                                tt_global_entry->roam_at = jiffies;
                                goto out;
                        }
@@ -733,7 +699,8 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                        struct orig_node *orig_node, const char *message)
 {
        struct tt_global_entry *tt_global_entry;
-       int i;
+       struct tt_common_entry *tt_common_entry;
+       uint32_t i;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
        struct hlist_node *node, *safe;
        struct hlist_head *head;
@@ -747,14 +714,18 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, safe,
+               hlist_for_each_entry_safe(tt_common_entry, node, safe,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        if (tt_global_entry->orig_node == orig_node) {
                                bat_dbg(DBG_TT, bat_priv,
                                        "Deleting global tt entry %pM "
-                                       "(via %pM): originator time out\n",
-                                       tt_global_entry->addr,
-                                       tt_global_entry->orig_node->orig);
+                                       "(via %pM): %s\n",
+                                       tt_global_entry->common.addr,
+                                       tt_global_entry->orig_node->orig,
+                                       message);
                                hlist_del_rcu(node);
                                tt_global_entry_free_ref(tt_global_entry);
                        }
@@ -767,20 +738,24 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
 static void tt_global_roam_purge(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
-       int i;
+       uint32_t i;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
+                       if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
                                continue;
                        if (!is_out_of_time(tt_global_entry->roam_at,
                                            TT_CLIENT_ROAM_TIMEOUT * 1000))
@@ -788,7 +763,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
 
                        bat_dbg(DBG_TT, bat_priv, "Deleting global "
                                "tt entry (%pM): Roaming timeout\n",
-                               tt_global_entry->addr);
+                               tt_global_entry->common.addr);
                        atomic_dec(&tt_global_entry->orig_node->tt_size);
                        hlist_del_rcu(node);
                        tt_global_entry_free_ref(tt_global_entry);
@@ -802,10 +777,11 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
-       int i;
+       uint32_t i;
 
        if (!bat_priv->tt_global_hash)
                return;
@@ -817,9 +793,12 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
                        hlist_del_rcu(node);
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        tt_global_entry_free_ref(tt_global_entry);
                }
                spin_unlock_bh(list_lock);
@@ -835,8 +814,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
 {
        bool ret = false;
 
-       if (tt_local_entry->flags & TT_CLIENT_WIFI &&
-           tt_global_entry->flags & TT_CLIENT_WIFI)
+       if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
+           tt_global_entry->common.flags & TT_CLIENT_WIFI)
                ret = true;
 
        return ret;
@@ -869,7 +848,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
 
        /* A global client marked as PENDING has already moved from that
         * originator */
-       if (tt_global_entry->flags & TT_CLIENT_PENDING)
+       if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
                goto out;
 
        orig_node = tt_global_entry->orig_node;
@@ -888,29 +867,34 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_global_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_global_entry *tt_global_entry;
        struct hlist_node *node;
        struct hlist_head *head;
-       int i, j;
+       uint32_t i;
+       int j;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_global_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
+                       tt_global_entry = container_of(tt_common_entry,
+                                                      struct tt_global_entry,
+                                                      common);
                        if (compare_eth(tt_global_entry->orig_node,
                                        orig_node)) {
                                /* Roaming clients are in the global table for
                                 * consistency only. They don't have to be
                                 * taken into account while computing the
                                 * global crc */
-                               if (tt_global_entry->flags & TT_CLIENT_ROAM)
+                               if (tt_common_entry->flags & TT_CLIENT_ROAM)
                                        continue;
                                total_one = 0;
                                for (j = 0; j < ETH_ALEN; j++)
                                        total_one = crc16_byte(total_one,
-                                               tt_global_entry->addr[j]);
+                                               tt_common_entry->addr[j]);
                                total ^= total_one;
                        }
                }
@@ -925,25 +909,26 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
 {
        uint16_t total = 0, total_one;
        struct hashtable_t *hash = bat_priv->tt_local_hash;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct hlist_node *node;
        struct hlist_head *head;
-       int i, j;
+       uint32_t i;
+       int j;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        /* not yet committed clients have not to be taken into
                         * account while computing the CRC */
-                       if (tt_local_entry->flags & TT_CLIENT_NEW)
+                       if (tt_common_entry->flags & TT_CLIENT_NEW)
                                continue;
                        total_one = 0;
                        for (j = 0; j < ETH_ALEN; j++)
                                total_one = crc16_byte(total_one,
-                                                  tt_local_entry->addr[j]);
+                                                  tt_common_entry->addr[j]);
                        total ^= total_one;
                }
                rcu_read_unlock();
@@ -1032,21 +1017,25 @@ unlock:
 /* data_ptr is useless here, but has to be kept to respect the prototype */
 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-       const struct tt_local_entry *tt_local_entry = entry_ptr;
+       const struct tt_common_entry *tt_common_entry = entry_ptr;
 
-       if (tt_local_entry->flags & TT_CLIENT_NEW)
+       if (tt_common_entry->flags & TT_CLIENT_NEW)
                return 0;
        return 1;
 }
 
 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
 {
-       const struct tt_global_entry *tt_global_entry = entry_ptr;
+       const struct tt_common_entry *tt_common_entry = entry_ptr;
+       const struct tt_global_entry *tt_global_entry;
        const struct orig_node *orig_node = data_ptr;
 
-       if (tt_global_entry->flags & TT_CLIENT_ROAM)
+       if (tt_common_entry->flags & TT_CLIENT_ROAM)
                return 0;
 
+       tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+                                      common);
+
        return (tt_global_entry->orig_node == orig_node);
 }
 
@@ -1057,7 +1046,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
                                                              const void *),
                                              void *cb_data)
 {
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
        struct tt_query_packet *tt_response;
        struct tt_change *tt_change;
        struct hlist_node *node;
@@ -1065,7 +1054,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        struct sk_buff *skb = NULL;
        uint16_t tt_tot, tt_count;
        ssize_t tt_query_size = sizeof(struct tt_query_packet);
-       int i;
+       uint32_t i;
 
        if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
                tt_len = primary_if->soft_iface->mtu - tt_query_size;
@@ -1089,15 +1078,16 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
                        if (tt_count == tt_tot)
                                break;
 
-                       if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
+                       if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
                                continue;
 
-                       memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
+                       memcpy(tt_change->addr, tt_common_entry->addr,
+                              ETH_ALEN);
                        tt_change->flags = NO_FLAGS;
 
                        tt_count++;
@@ -1204,11 +1194,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
                (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
 
        /* Let's get the orig node of the REAL destination */
-       req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
+       req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
        if (!req_dst_orig_node)
                goto out;
 
-       res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
+       res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
        if (!res_dst_orig_node)
                goto out;
 
@@ -1334,7 +1324,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
        my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
        req_ttvn = tt_request->ttvn;
 
-       orig_node = get_orig_node(bat_priv, tt_request->src);
+       orig_node = orig_hash_find(bat_priv, tt_request->src);
        if (!orig_node)
                goto out;
 
@@ -1514,7 +1504,7 @@ bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
                goto out;
        /* Check if the client has been logically deleted (but is kept for
         * consistency purpose) */
-       if (tt_local_entry->flags & TT_CLIENT_PENDING)
+       if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
                goto out;
        ret = true;
 out:
@@ -1737,45 +1727,53 @@ void tt_free(struct bat_priv *bat_priv)
        kfree(bat_priv->tt_buff);
 }
 
-/* This function will reset the specified flags from all the entries in
- * the given hash table and will increment num_local_tt for each involved
- * entry */
-static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
+/* This function will enable or disable the specified flags for all the entries
+ * in the given hash table and returns the number of modified entries */
+static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
+                            bool enable)
 {
-       int i;
-       struct hashtable_t *hash = bat_priv->tt_local_hash;
+       uint32_t i;
+       uint16_t changed_num = 0;
        struct hlist_head *head;
        struct hlist_node *node;
-       struct tt_local_entry *tt_local_entry;
+       struct tt_common_entry *tt_common_entry;
 
        if (!hash)
-               return;
+               goto out;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node,
+               hlist_for_each_entry_rcu(tt_common_entry, node,
                                         head, hash_entry) {
-                       if (!(tt_local_entry->flags & flags))
-                               continue;
-                       tt_local_entry->flags &= ~flags;
-                       atomic_inc(&bat_priv->num_local_tt);
+                       if (enable) {
+                               if ((tt_common_entry->flags & flags) == flags)
+                                       continue;
+                               tt_common_entry->flags |= flags;
+                       } else {
+                               if (!(tt_common_entry->flags & flags))
+                                       continue;
+                               tt_common_entry->flags &= ~flags;
+                       }
+                       changed_num++;
                }
                rcu_read_unlock();
        }
-
+out:
+       return changed_num;
 }
 
 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 {
        struct hashtable_t *hash = bat_priv->tt_local_hash;
+       struct tt_common_entry *tt_common_entry;
        struct tt_local_entry *tt_local_entry;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
-       int i;
+       uint32_t i;
 
        if (!hash)
                return;
@@ -1785,16 +1783,19 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
+               hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
                                          head, hash_entry) {
-                       if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
+                       if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
                                continue;
 
                        bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
-                               "(%pM): pending\n", tt_local_entry->addr);
+                               "(%pM): pending\n", tt_common_entry->addr);
 
                        atomic_dec(&bat_priv->num_local_tt);
                        hlist_del_rcu(node);
+                       tt_local_entry = container_of(tt_common_entry,
+                                                     struct tt_local_entry,
+                                                     common);
                        tt_local_entry_free_ref(tt_local_entry);
                }
                spin_unlock_bh(list_lock);
@@ -1804,7 +1805,11 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
 
 void tt_commit_changes(struct bat_priv *bat_priv)
 {
-       tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
+       uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
+                                           TT_CLIENT_NEW, false);
+       /* all the reset entries have now to be effectively counted as local
+        * entries */
+       atomic_add(changed_num, &bat_priv->num_local_tt);
        tt_local_purge_pending_clients(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
index ab8d0fe..e9eb043 100644 (file)
@@ -222,24 +222,24 @@ struct socket_packet {
        struct icmp_packet_rr icmp_packet;
 };
 
-struct tt_local_entry {
+struct tt_common_entry {
        uint8_t addr[ETH_ALEN];
        struct hlist_node hash_entry;
-       unsigned long last_seen;
        uint16_t flags;
        atomic_t refcount;
        struct rcu_head rcu;
 };
 
+struct tt_local_entry {
+       struct tt_common_entry common;
+       unsigned long last_seen;
+};
+
 struct tt_global_entry {
-       uint8_t addr[ETH_ALEN];
-       struct hlist_node hash_entry; /* entry in the global table */
+       struct tt_common_entry common;
        struct orig_node *orig_node;
        uint8_t ttvn;
-       uint16_t flags; /* only TT_GLOBAL_ROAM is used */
        unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
-       atomic_t refcount;
-       struct rcu_head rcu;
 };
 
 struct tt_change_node {
index f81a6b6..cc3b9f2 100644 (file)
@@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2)
 
 /* hash function to choose an entry in a hash table of given size */
 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(const void *data, int size)
+static uint32_t vis_info_choose(const void *data, uint32_t size)
 {
        const struct vis_info *vis_info = data;
        const struct vis_packet *packet;
@@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
        struct hlist_head *head;
        struct hlist_node *node;
        struct vis_info *vis_info, *vis_info_tmp = NULL;
-       int index;
+       uint32_t index;
 
        if (!hash)
                return NULL;
@@ -202,7 +202,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
        HLIST_HEAD(vis_if_list);
        struct if_list_entry *entry;
        struct hlist_node *pos, *n;
-       int i, j, ret = 0;
+       uint32_t i;
+       int j, ret = 0;
        int vis_server = atomic_read(&bat_priv->vis_mode);
        size_t buff_pos, buf_size;
        char *buff;
@@ -556,7 +557,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
        struct hlist_head *head;
        struct orig_node *orig_node;
        struct vis_packet *packet;
-       int best_tq = -1, i;
+       int best_tq = -1;
+       uint32_t i;
 
        packet = (struct vis_packet *)info->skb_packet->data;
 
@@ -607,8 +609,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
        struct vis_info *info = bat_priv->my_vis_info;
        struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
        struct vis_info_entry *entry;
-       struct tt_local_entry *tt_local_entry;
-       int best_tq = -1, i;
+       struct tt_common_entry *tt_common_entry;
+       int best_tq = -1;
+       uint32_t i;
 
        info->first_seen = jiffies;
        packet->vis_type = atomic_read(&bat_priv->vis_mode);
@@ -669,13 +672,13 @@ next:
                head = &hash->table[i];
 
                rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_local_entry, node, head,
+               hlist_for_each_entry_rcu(tt_common_entry, node, head,
                                         hash_entry) {
                        entry = (struct vis_info_entry *)
                                        skb_put(info->skb_packet,
                                                sizeof(*entry));
                        memset(entry->src, 0, ETH_ALEN);
-                       memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
+                       memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
                        entry->quality = 0; /* 0 means TT */
                        packet->entries++;
 
@@ -696,7 +699,7 @@ unlock:
  * held */
 static void purge_vis_packets(struct bat_priv *bat_priv)
 {
-       int i;
+       uint32_t i;
        struct hashtable_t *hash = bat_priv->vis_hash;
        struct hlist_node *node, *node_tmp;
        struct hlist_head *head;
@@ -733,7 +736,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
        struct sk_buff *skb;
        struct hard_iface *hard_iface;
        uint8_t dstaddr[ETH_ALEN];
-       int i;
+       uint32_t i;
 
 
        packet = (struct vis_packet *)info->skb_packet->data;
index 1eea820..42d53b8 100644 (file)
@@ -65,15 +65,13 @@ static DECLARE_RWSEM(bnep_session_sem);
 static struct bnep_session *__bnep_get_session(u8 *dst)
 {
        struct bnep_session *s;
-       struct list_head *p;
 
        BT_DBG("");
 
-       list_for_each(p, &bnep_session_list) {
-               s = list_entry(p, struct bnep_session, list);
+       list_for_each_entry(s, &bnep_session_list, list)
                if (!compare_ether_addr(dst, s->eh.h_source))
                        return s;
-       }
+
        return NULL;
 }
 
@@ -665,17 +663,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
 
 int bnep_get_connlist(struct bnep_connlist_req *req)
 {
-       struct list_head *p;
+       struct bnep_session *s;
        int err = 0, n = 0;
 
        down_read(&bnep_session_sem);
 
-       list_for_each(p, &bnep_session_list) {
-               struct bnep_session *s;
+       list_for_each_entry(s, &bnep_session_list, list) {
                struct bnep_conninfo ci;
 
-               s = list_entry(p, struct bnep_session, list);
-
                __bnep_copy_ci(&ci, s);
 
                if (copy_to_user(req->ci, &ci, sizeof(ci))) {
index 5a6e634..6c9c1fd 100644 (file)
@@ -53,15 +53,13 @@ static LIST_HEAD(cmtp_session_list);
 static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
 {
        struct cmtp_session *session;
-       struct list_head *p;
 
        BT_DBG("");
 
-       list_for_each(p, &cmtp_session_list) {
-               session = list_entry(p, struct cmtp_session, list);
+       list_for_each_entry(session, &cmtp_session_list, list)
                if (!bacmp(bdaddr, &session->bdaddr))
                        return session;
-       }
+
        return NULL;
 }
 
@@ -432,19 +430,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
 
 int cmtp_get_connlist(struct cmtp_connlist_req *req)
 {
-       struct list_head *p;
+       struct cmtp_session *session;
        int err = 0, n = 0;
 
        BT_DBG("");
 
        down_read(&cmtp_session_sem);
 
-       list_for_each(p, &cmtp_session_list) {
-               struct cmtp_session *session;
+       list_for_each_entry(session, &cmtp_session_list, list) {
                struct cmtp_conninfo ci;
 
-               session = list_entry(p, struct cmtp_session, list);
-
                __cmtp_copy_session(session, &ci);
 
                if (copy_to_user(req->ci, &ci, sizeof(ci))) {
index e0af723..b328ac6 100644 (file)
@@ -123,7 +123,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
 
        BT_DBG("%p", conn);
 
-       if (conn->hdev->hci_ver < 2)
+       if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
                return;
 
        bacpy(&cp.bdaddr, &conn->dst);
@@ -374,6 +374,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
 
        skb_queue_head_init(&conn->data_q);
 
+       hci_chan_hash_init(conn);
+
        setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
        setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
        setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
@@ -432,6 +434,8 @@ int hci_conn_del(struct hci_conn *conn)
 
        tasklet_disable(&hdev->tx_task);
 
+       hci_chan_hash_flush(conn);
+
        hci_conn_hash_del(hdev, conn);
        if (hdev->notify)
                hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -453,16 +457,13 @@ int hci_conn_del(struct hci_conn *conn)
 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 {
        int use_src = bacmp(src, BDADDR_ANY);
-       struct hci_dev *hdev = NULL;
-       struct list_head *p;
+       struct hci_dev *hdev = NULL, *d;
 
        BT_DBG("%s -> %s", batostr(src), batostr(dst));
 
        read_lock_bh(&hci_dev_list_lock);
 
-       list_for_each(p, &hci_dev_list) {
-               struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
+       list_for_each_entry(d, &hci_dev_list, list) {
                if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
                        continue;
 
@@ -819,7 +820,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
 
                c->state = BT_CLOSED;
 
-               hci_proto_disconn_cfm(c, 0x16);
+               hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
                hci_conn_del(c);
        }
 }
@@ -855,10 +856,10 @@ EXPORT_SYMBOL(hci_conn_put_device);
 
 int hci_get_conn_list(void __user *arg)
 {
+       register struct hci_conn *c;
        struct hci_conn_list_req req, *cl;
        struct hci_conn_info *ci;
        struct hci_dev *hdev;
-       struct list_head *p;
        int n = 0, size, err;
 
        if (copy_from_user(&req, arg, sizeof(req)))
@@ -882,10 +883,7 @@ int hci_get_conn_list(void __user *arg)
        ci = cl->conn_info;
 
        hci_dev_lock_bh(hdev);
-       list_for_each(p, &hdev->conn_hash.list) {
-               register struct hci_conn *c;
-               c = list_entry(p, struct hci_conn, list);
-
+       list_for_each_entry(c, &hdev->conn_hash.list, list) {
                bacpy(&(ci + n)->bdaddr, &c->dst);
                (ci + n)->handle = c->handle;
                (ci + n)->type  = c->type;
@@ -956,3 +954,52 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
 
        return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
 }
+
+struct hci_chan *hci_chan_create(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct hci_chan *chan;
+
+       BT_DBG("%s conn %p", hdev->name, conn);
+
+       chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
+       if (!chan)
+               return NULL;
+
+       chan->conn = conn;
+       skb_queue_head_init(&chan->data_q);
+
+       tasklet_disable(&hdev->tx_task);
+       hci_chan_hash_add(conn, chan);
+       tasklet_enable(&hdev->tx_task);
+
+       return chan;
+}
+
+int hci_chan_del(struct hci_chan *chan)
+{
+       struct hci_conn *conn = chan->conn;
+       struct hci_dev *hdev = conn->hdev;
+
+       BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+
+       tasklet_disable(&hdev->tx_task);
+       hci_chan_hash_del(conn, chan);
+       tasklet_enable(&hdev->tx_task);
+
+       skb_queue_purge(&chan->data_q);
+       kfree(chan);
+
+       return 0;
+}
+
+void hci_chan_hash_flush(struct hci_conn *conn)
+{
+       struct hci_chan_hash *h = &conn->chan_hash;
+       struct hci_chan *chan, *tmp;
+
+       BT_DBG("conn %p", conn);
+
+       list_for_each_entry_safe(chan, tmp, &h->list, list)
+               hci_chan_del(chan);
+}
index be84ae3..ce3727e 100644 (file)
@@ -54,6 +54,8 @@
 
 #define AUTO_OFF_TIMEOUT 2000
 
+int enable_hs;
+
 static void hci_cmd_task(unsigned long arg);
 static void hci_rx_task(unsigned long arg);
 static void hci_tx_task(unsigned long arg);
@@ -228,18 +230,6 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
        /* Read Buffer Size (ACL mtu, max pkt, etc.) */
        hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
 
-#if 0
-       /* Host buffer size */
-       {
-               struct hci_cp_host_buffer_size cp;
-               cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
-               cp.sco_mtu = HCI_MAX_SCO_SIZE;
-               cp.acl_max_pkt = cpu_to_le16(0xffff);
-               cp.sco_max_pkt = cpu_to_le16(0xffff);
-               hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
-       }
-#endif
-
        /* Read BD Address */
        hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
 
@@ -319,8 +309,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
  * Device is held on return. */
 struct hci_dev *hci_dev_get(int index)
 {
-       struct hci_dev *hdev = NULL;
-       struct list_head *p;
+       struct hci_dev *hdev = NULL, *d;
 
        BT_DBG("%d", index);
 
@@ -328,8 +317,7 @@ struct hci_dev *hci_dev_get(int index)
                return NULL;
 
        read_lock(&hci_dev_list_lock);
-       list_for_each(p, &hci_dev_list) {
-               struct hci_dev *d = list_entry(p, struct hci_dev, list);
+       list_for_each_entry(d, &hci_dev_list, list) {
                if (d->id == index) {
                        hdev = hci_dev_hold(d);
                        break;
@@ -523,8 +511,9 @@ int hci_dev_open(__u16 dev)
        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
                set_bit(HCI_RAW, &hdev->flags);
 
-       /* Treat all non BR/EDR controllers as raw devices for now */
-       if (hdev->dev_type != HCI_BREDR)
+       /* Treat all non BR/EDR controllers as raw devices if
+          enable_hs is not set */
+       if (hdev->dev_type != HCI_BREDR && !enable_hs)
                set_bit(HCI_RAW, &hdev->flags);
 
        if (hdev->open(hdev)) {
@@ -551,8 +540,11 @@ int hci_dev_open(__u16 dev)
                hci_dev_hold(hdev);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
-               if (!test_bit(HCI_SETUP, &hdev->flags))
-                       mgmt_powered(hdev->id, 1);
+               if (!test_bit(HCI_SETUP, &hdev->flags)) {
+                       hci_dev_lock_bh(hdev);
+                       mgmt_powered(hdev, 1);
+                       hci_dev_unlock_bh(hdev);
+               }
        } else {
                /* Init failed, cleanup */
                tasklet_kill(&hdev->rx_task);
@@ -597,6 +589,14 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        tasklet_kill(&hdev->rx_task);
        tasklet_kill(&hdev->tx_task);
 
+       if (hdev->discov_timeout > 0) {
+               cancel_delayed_work(&hdev->discov_off);
+               hdev->discov_timeout = 0;
+       }
+
+       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+               cancel_delayed_work(&hdev->power_off);
+
        hci_dev_lock_bh(hdev);
        inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
@@ -636,7 +636,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
         * and no tasks are scheduled. */
        hdev->close(hdev);
 
-       mgmt_powered(hdev->id, 0);
+       hci_dev_lock_bh(hdev);
+       mgmt_powered(hdev, 0);
+       hci_dev_unlock_bh(hdev);
 
        /* Clear flags */
        hdev->flags = 0;
@@ -794,9 +796,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
 
 int hci_get_dev_list(void __user *arg)
 {
+       struct hci_dev *hdev;
        struct hci_dev_list_req *dl;
        struct hci_dev_req *dr;
-       struct list_head *p;
        int n = 0, size, err;
        __u16 dev_num;
 
@@ -815,12 +817,9 @@ int hci_get_dev_list(void __user *arg)
        dr = dl->dev_req;
 
        read_lock_bh(&hci_dev_list_lock);
-       list_for_each(p, &hci_dev_list) {
-               struct hci_dev *hdev;
-
-               hdev = list_entry(p, struct hci_dev, list);
-
-               hci_del_off_timer(hdev);
+       list_for_each_entry(hdev, &hci_dev_list, list) {
+               if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+                       cancel_delayed_work(&hdev->power_off);
 
                if (!test_bit(HCI_MGMT, &hdev->flags))
                        set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -855,7 +854,8 @@ int hci_get_dev_info(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
-       hci_del_off_timer(hdev);
+       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+               cancel_delayed_work_sync(&hdev->power_off);
 
        if (!test_bit(HCI_MGMT, &hdev->flags))
                set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -912,6 +912,7 @@ struct hci_dev *hci_alloc_dev(void)
        if (!hdev)
                return NULL;
 
+       hci_init_sysfs(hdev);
        skb_queue_head_init(&hdev->driver_init);
 
        return hdev;
@@ -938,39 +939,41 @@ static void hci_power_on(struct work_struct *work)
                return;
 
        if (test_bit(HCI_AUTO_OFF, &hdev->flags))
-               mod_timer(&hdev->off_timer,
-                               jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+               queue_delayed_work(hdev->workqueue, &hdev->power_off,
+                                       msecs_to_jiffies(AUTO_OFF_TIMEOUT));
 
        if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
-               mgmt_index_added(hdev->id);
+               mgmt_index_added(hdev);
 }
 
 static void hci_power_off(struct work_struct *work)
 {
-       struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                                       power_off.work);
 
        BT_DBG("%s", hdev->name);
 
+       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+
        hci_dev_close(hdev->id);
 }
 
-static void hci_auto_off(unsigned long data)
+static void hci_discov_off(struct work_struct *work)
 {
-       struct hci_dev *hdev = (struct hci_dev *) data;
+       struct hci_dev *hdev;
+       u8 scan = SCAN_PAGE;
+
+       hdev = container_of(work, struct hci_dev, discov_off.work);
 
        BT_DBG("%s", hdev->name);
 
-       clear_bit(HCI_AUTO_OFF, &hdev->flags);
+       hci_dev_lock_bh(hdev);
 
-       queue_work(hdev->workqueue, &hdev->power_off);
-}
+       hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
 
-void hci_del_off_timer(struct hci_dev *hdev)
-{
-       BT_DBG("%s", hdev->name);
+       hdev->discov_timeout = 0;
 
-       clear_bit(HCI_AUTO_OFF, &hdev->flags);
-       del_timer(&hdev->off_timer);
+       hci_dev_unlock_bh(hdev);
 }
 
 int hci_uuids_clear(struct hci_dev *hdev)
@@ -1007,16 +1010,11 @@ int hci_link_keys_clear(struct hci_dev *hdev)
 
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
-       struct list_head *p;
-
-       list_for_each(p, &hdev->link_keys) {
-               struct link_key *k;
-
-               k = list_entry(p, struct link_key, list);
+       struct link_key *k;
 
+       list_for_each_entry(k, &hdev->link_keys, list)
                if (bacmp(bdaddr, &k->bdaddr) == 0)
                        return k;
-       }
 
        return NULL;
 }
@@ -1138,7 +1136,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
 
        persistent = hci_persistent_key(hdev, conn, type, old_key_type);
 
-       mgmt_new_key(hdev->id, key, persistent);
+       mgmt_new_link_key(hdev, key, persistent);
 
        if (!persistent) {
                list_del(&key->list);
@@ -1181,7 +1179,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
        memcpy(id->rand, rand, sizeof(id->rand));
 
        if (new_key)
-               mgmt_new_key(hdev->id, key, old_key_type);
+               mgmt_new_link_key(hdev, key, old_key_type);
 
        return 0;
 }
@@ -1279,16 +1277,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
                                                bdaddr_t *bdaddr)
 {
-       struct list_head *p;
-
-       list_for_each(p, &hdev->blacklist) {
-               struct bdaddr_list *b;
-
-               b = list_entry(p, struct bdaddr_list, list);
+       struct bdaddr_list *b;
 
+       list_for_each_entry(b, &hdev->blacklist, list)
                if (bacmp(bdaddr, &b->bdaddr) == 0)
                        return b;
-       }
 
        return NULL;
 }
@@ -1327,26 +1320,24 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
 
        list_add(&entry->list, &hdev->blacklist);
 
-       return mgmt_device_blocked(hdev->id, bdaddr);
+       return mgmt_device_blocked(hdev, bdaddr);
 }
 
 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
        struct bdaddr_list *entry;
 
-       if (bacmp(bdaddr, BDADDR_ANY) == 0) {
+       if (bacmp(bdaddr, BDADDR_ANY) == 0)
                return hci_blacklist_clear(hdev);
-       }
 
        entry = hci_blacklist_lookup(hdev, bdaddr);
-       if (!entry) {
+       if (!entry)
                return -ENOENT;
-       }
 
        list_del(&entry->list);
        kfree(entry);
 
-       return mgmt_device_unblocked(hdev->id, bdaddr);
+       return mgmt_device_unblocked(hdev, bdaddr);
 }
 
 static void hci_clear_adv_cache(unsigned long arg)
@@ -1425,7 +1416,7 @@ int hci_add_adv_entry(struct hci_dev *hdev,
 int hci_register_dev(struct hci_dev *hdev)
 {
        struct list_head *head = &hci_dev_list, *p;
-       int i, id = 0;
+       int i, id, error;
 
        BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
                                                hdev->bus, hdev->owner);
@@ -1433,6 +1424,11 @@ int hci_register_dev(struct hci_dev *hdev)
        if (!hdev->open || !hdev->close || !hdev->destruct)
                return -EINVAL;
 
+       /* Do not allow HCI_AMP devices to register at index 0,
+        * so the index can be used as the AMP controller ID.
+        */
+       id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
+
        write_lock_bh(&hci_dev_list_lock);
 
        /* Find first available device id */
@@ -1444,12 +1440,13 @@ int hci_register_dev(struct hci_dev *hdev)
 
        sprintf(hdev->name, "hci%d", id);
        hdev->id = id;
-       list_add(&hdev->list, head);
+       list_add_tail(&hdev->list, head);
 
        atomic_set(&hdev->refcnt, 1);
        spin_lock_init(&hdev->lock);
 
        hdev->flags = 0;
+       hdev->dev_flags = 0;
        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
@@ -1479,6 +1476,8 @@ int hci_register_dev(struct hci_dev *hdev)
 
        hci_conn_hash_init(hdev);
 
+       INIT_LIST_HEAD(&hdev->mgmt_pending);
+
        INIT_LIST_HEAD(&hdev->blacklist);
 
        INIT_LIST_HEAD(&hdev->uuids);
@@ -1492,8 +1491,9 @@ int hci_register_dev(struct hci_dev *hdev)
                                                (unsigned long) hdev);
 
        INIT_WORK(&hdev->power_on, hci_power_on);
-       INIT_WORK(&hdev->power_off, hci_power_off);
-       setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
+       INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
+
+       INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
 
        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
@@ -1502,10 +1502,14 @@ int hci_register_dev(struct hci_dev *hdev)
        write_unlock_bh(&hci_dev_list_lock);
 
        hdev->workqueue = create_singlethread_workqueue(hdev->name);
-       if (!hdev->workqueue)
-               goto nomem;
+       if (!hdev->workqueue) {
+               error = -ENOMEM;
+               goto err;
+       }
 
-       hci_register_sysfs(hdev);
+       error = hci_add_sysfs(hdev);
+       if (error < 0)
+               goto err_wqueue;
 
        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
                                RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
@@ -1524,17 +1528,19 @@ int hci_register_dev(struct hci_dev *hdev)
 
        return id;
 
-nomem:
+err_wqueue:
+       destroy_workqueue(hdev->workqueue);
+err:
        write_lock_bh(&hci_dev_list_lock);
        list_del(&hdev->list);
        write_unlock_bh(&hci_dev_list_lock);
 
-       return -ENOMEM;
+       return error;
 }
 EXPORT_SYMBOL(hci_register_dev);
 
 /* Unregister HCI device */
-int hci_unregister_dev(struct hci_dev *hdev)
+void hci_unregister_dev(struct hci_dev *hdev)
 {
        int i;
 
@@ -1550,8 +1556,15 @@ int hci_unregister_dev(struct hci_dev *hdev)
                kfree_skb(hdev->reassembly[i]);
 
        if (!test_bit(HCI_INIT, &hdev->flags) &&
-                                       !test_bit(HCI_SETUP, &hdev->flags))
-               mgmt_index_removed(hdev->id);
+                                       !test_bit(HCI_SETUP, &hdev->flags)) {
+               hci_dev_lock_bh(hdev);
+               mgmt_index_removed(hdev);
+               hci_dev_unlock_bh(hdev);
+       }
+
+       /* mgmt_index_removed should take care of emptying the
+        * pending list */
+       BUG_ON(!list_empty(&hdev->mgmt_pending));
 
        hci_notify(hdev, HCI_DEV_UNREG);
 
@@ -1560,9 +1573,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
                rfkill_destroy(hdev->rfkill);
        }
 
-       hci_unregister_sysfs(hdev);
+       hci_del_sysfs(hdev);
 
-       hci_del_off_timer(hdev);
        del_timer(&hdev->adv_timer);
 
        destroy_workqueue(hdev->workqueue);
@@ -1576,8 +1588,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
        hci_dev_unlock_bh(hdev);
 
        __hci_dev_put(hdev);
-
-       return 0;
 }
 EXPORT_SYMBOL(hci_unregister_dev);
 
@@ -1948,23 +1958,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
        hdr->dlen   = cpu_to_le16(len);
 }
 
-void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
+static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
+                               struct sk_buff *skb, __u16 flags)
 {
        struct hci_dev *hdev = conn->hdev;
        struct sk_buff *list;
 
-       BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
-
-       skb->dev = (void *) hdev;
-       bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
-       hci_add_acl_hdr(skb, conn->handle, flags);
-
        list = skb_shinfo(skb)->frag_list;
        if (!list) {
                /* Non fragmented */
                BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
 
-               skb_queue_tail(&conn->data_q, skb);
+               skb_queue_tail(queue, skb);
        } else {
                /* Fragmented */
                BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
@@ -1972,9 +1977,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
                skb_shinfo(skb)->frag_list = NULL;
 
                /* Queue all fragments atomically */
-               spin_lock_bh(&conn->data_q.lock);
+               spin_lock_bh(&queue->lock);
 
-               __skb_queue_tail(&conn->data_q, skb);
+               __skb_queue_tail(queue, skb);
 
                flags &= ~ACL_START;
                flags |= ACL_CONT;
@@ -1987,11 +1992,25 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
 
                        BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
 
-                       __skb_queue_tail(&conn->data_q, skb);
+                       __skb_queue_tail(queue, skb);
                } while (list);
 
-               spin_unlock_bh(&conn->data_q.lock);
+               spin_unlock_bh(&queue->lock);
        }
+}
+
+void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
+{
+       struct hci_conn *conn = chan->conn;
+       struct hci_dev *hdev = conn->hdev;
+
+       BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+
+       skb->dev = (void *) hdev;
+       bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
+       hci_add_acl_hdr(skb, conn->handle, flags);
+
+       hci_queue_acl(conn, &chan->data_q, skb, flags);
 
        tasklet_schedule(&hdev->tx_task);
 }
@@ -2026,16 +2045,12 @@ EXPORT_SYMBOL(hci_send_sco);
 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
-       struct hci_conn *conn = NULL;
+       struct hci_conn *conn = NULL, *c;
        int num = 0, min = ~0;
-       struct list_head *p;
 
        /* We don't have to lock device here. Connections are always
         * added and removed with TX task disabled. */
-       list_for_each(p, &h->list) {
-               struct hci_conn *c;
-               c = list_entry(p, struct hci_conn, list);
-
+       list_for_each_entry(c, &h->list, list) {
                if (c->type != type || skb_queue_empty(&c->data_q))
                        continue;
 
@@ -2084,14 +2099,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
-       struct list_head *p;
-       struct hci_conn  *c;
+       struct hci_conn *c;
 
        BT_ERR("%s link tx timeout", hdev->name);
 
        /* Kill stalled connections */
-       list_for_each(p, &h->list) {
-               c = list_entry(p, struct hci_conn, list);
+       list_for_each_entry(c, &h->list, list) {
                if (c->type == type && c->sent) {
                        BT_ERR("%s killing stalled connection %s",
                                hdev->name, batostr(&c->dst));
@@ -2100,11 +2113,137 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
        }
 }
 
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+                                               int *quote)
 {
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_chan *chan = NULL;
+       int num = 0, min = ~0, cur_prio = 0;
        struct hci_conn *conn;
+       int cnt, q, conn_num = 0;
+
+       BT_DBG("%s", hdev->name);
+
+       list_for_each_entry(conn, &h->list, list) {
+               struct hci_chan_hash *ch;
+               struct hci_chan *tmp;
+
+               if (conn->type != type)
+                       continue;
+
+               if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+                       continue;
+
+               conn_num++;
+
+               ch = &conn->chan_hash;
+
+               list_for_each_entry(tmp, &ch->list, list) {
+                       struct sk_buff *skb;
+
+                       if (skb_queue_empty(&tmp->data_q))
+                               continue;
+
+                       skb = skb_peek(&tmp->data_q);
+                       if (skb->priority < cur_prio)
+                               continue;
+
+                       if (skb->priority > cur_prio) {
+                               num = 0;
+                               min = ~0;
+                               cur_prio = skb->priority;
+                       }
+
+                       num++;
+
+                       if (conn->sent < min) {
+                               min  = conn->sent;
+                               chan = tmp;
+                       }
+               }
+
+               if (hci_conn_num(hdev, type) == conn_num)
+                       break;
+       }
+
+       if (!chan)
+               return NULL;
+
+       switch (chan->conn->type) {
+       case ACL_LINK:
+               cnt = hdev->acl_cnt;
+               break;
+       case SCO_LINK:
+       case ESCO_LINK:
+               cnt = hdev->sco_cnt;
+               break;
+       case LE_LINK:
+               cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+               break;
+       default:
+               cnt = 0;
+               BT_ERR("Unknown link type");
+       }
+
+       q = cnt / num;
+       *quote = q ? q : 1;
+       BT_DBG("chan %p quote %d", chan, *quote);
+       return chan;
+}
+
+static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
+{
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct hci_conn *conn;
+       int num = 0;
+
+       BT_DBG("%s", hdev->name);
+
+       list_for_each_entry(conn, &h->list, list) {
+               struct hci_chan_hash *ch;
+               struct hci_chan *chan;
+
+               if (conn->type != type)
+                       continue;
+
+               if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+                       continue;
+
+               num++;
+
+               ch = &conn->chan_hash;
+               list_for_each_entry(chan, &ch->list, list) {
+                       struct sk_buff *skb;
+
+                       if (chan->sent) {
+                               chan->sent = 0;
+                               continue;
+                       }
+
+                       if (skb_queue_empty(&chan->data_q))
+                               continue;
+
+                       skb = skb_peek(&chan->data_q);
+                       if (skb->priority >= HCI_PRIO_MAX - 1)
+                               continue;
+
+                       skb->priority = HCI_PRIO_MAX - 1;
+
+                       BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+                                                               skb->priority);
+               }
+
+               if (hci_conn_num(hdev, type) == num)
+                       break;
+       }
+}
+
+static inline void hci_sched_acl(struct hci_dev *hdev)
+{
+       struct hci_chan *chan;
        struct sk_buff *skb;
        int quote;
+       unsigned int cnt;
 
        BT_DBG("%s", hdev->name);
 
@@ -2118,19 +2257,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
                        hci_link_tx_to(hdev, ACL_LINK);
        }
 
-       while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
-               while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-                       BT_DBG("skb %p len %d", skb, skb->len);
+       cnt = hdev->acl_cnt;
 
-                       hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
+       while (hdev->acl_cnt &&
+                       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+               u32 priority = (skb_peek(&chan->data_q))->priority;
+               while (quote-- && (skb = skb_peek(&chan->data_q))) {
+                       BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+                                       skb->len, skb->priority);
+
+                       /* Stop if priority has changed */
+                       if (skb->priority < priority)
+                               break;
+
+                       skb = skb_dequeue(&chan->data_q);
+
+                       hci_conn_enter_active_mode(chan->conn,
+                                               bt_cb(skb)->force_active);
 
                        hci_send_frame(skb);
                        hdev->acl_last_tx = jiffies;
 
                        hdev->acl_cnt--;
-                       conn->sent++;
+                       chan->sent++;
+                       chan->conn->sent++;
                }
        }
+
+       if (cnt != hdev->acl_cnt)
+               hci_prio_recalculate(hdev, ACL_LINK);
 }
 
 /* Schedule SCO */
@@ -2182,9 +2337,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
 
 static inline void hci_sched_le(struct hci_dev *hdev)
 {
-       struct hci_conn *conn;
+       struct hci_chan *chan;
        struct sk_buff *skb;
-       int quote, cnt;
+       int quote, cnt, tmp;
 
        BT_DBG("%s", hdev->name);
 
@@ -2200,21 +2355,35 @@ static inline void hci_sched_le(struct hci_dev *hdev)
        }
 
        cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
-       while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
-               while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
-                       BT_DBG("skb %p len %d", skb, skb->len);
+       tmp = cnt;
+       while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+               u32 priority = (skb_peek(&chan->data_q))->priority;
+               while (quote-- && (skb = skb_peek(&chan->data_q))) {
+                       BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+                                       skb->len, skb->priority);
+
+                       /* Stop if priority has changed */
+                       if (skb->priority < priority)
+                               break;
+
+                       skb = skb_dequeue(&chan->data_q);
 
                        hci_send_frame(skb);
                        hdev->le_last_tx = jiffies;
 
                        cnt--;
-                       conn->sent++;
+                       chan->sent++;
+                       chan->conn->sent++;
                }
        }
+
        if (hdev->le_pkts)
                hdev->le_cnt = cnt;
        else
                hdev->acl_cnt = cnt;
+
+       if (cnt != tmp)
+               hci_prio_recalculate(hdev, LE_LINK);
 }
 
 static void hci_tx_task(unsigned long arg)
@@ -2407,3 +2576,34 @@ static void hci_cmd_task(unsigned long arg)
                }
        }
 }
+
+int hci_do_inquiry(struct hci_dev *hdev, u8 length)
+{
+       /* General inquiry access code (GIAC) */
+       u8 lap[3] = { 0x33, 0x8b, 0x9e };
+       struct hci_cp_inquiry cp;
+
+       BT_DBG("%s", hdev->name);
+
+       if (test_bit(HCI_INQUIRY, &hdev->flags))
+               return -EINPROGRESS;
+
+       memset(&cp, 0, sizeof(cp));
+       memcpy(&cp.lap, lap, sizeof(cp.lap));
+       cp.length  = length;
+
+       return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+}
+
+int hci_cancel_inquiry(struct hci_dev *hdev)
+{
+       BT_DBG("%s", hdev->name);
+
+       if (!test_bit(HCI_INQUIRY, &hdev->flags))
+               return -EPERM;
+
+       return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+}
+
+module_param(enable_hs, bool, 0644);
+MODULE_PARM_DESC(enable_hs, "Enable High Speed");
index 643a41b..35cb56e 100644 (file)
@@ -55,12 +55,18 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%x", hdev->name, status);
 
-       if (status)
+       if (status) {
+               hci_dev_lock(hdev);
+               mgmt_stop_discovery_failed(hdev, status);
+               hci_dev_unlock(hdev);
                return;
+       }
 
-       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
-                       test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_discovering(hdev->id, 0);
+       clear_bit(HCI_INQUIRY, &hdev->flags);
+
+       hci_dev_lock(hdev);
+       mgmt_discovering(hdev, 0);
+       hci_dev_unlock(hdev);
 
        hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
 
@@ -76,10 +82,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
        if (status)
                return;
 
-       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
-                               test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_discovering(hdev->id, 0);
-
        hci_conn_check_pending(hdev);
 }
 
@@ -192,6 +194,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
        clear_bit(HCI_RESET, &hdev->flags);
 
        hci_req_complete(hdev, HCI_OP_RESET, status);
+
+       hdev->dev_flags = 0;
 }
 
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -205,13 +209,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
        if (!sent)
                return;
 
+       hci_dev_lock(hdev);
+
        if (test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_set_local_name_complete(hdev->id, sent, status);
+               mgmt_set_local_name_complete(hdev, sent, status);
 
-       if (status)
-               return;
+       if (status == 0)
+               memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
 
-       memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
+       hci_dev_unlock(hdev);
 }
 
 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -274,7 +280,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
 
 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       __u8 status = *((__u8 *) skb->data);
+       __u8 param, status = *((__u8 *) skb->data);
+       int old_pscan, old_iscan;
        void *sent;
 
        BT_DBG("%s status 0x%x", hdev->name, status);
@@ -283,28 +290,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
        if (!sent)
                return;
 
-       if (!status) {
-               __u8 param = *((__u8 *) sent);
-               int old_pscan, old_iscan;
-
-               old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
-               old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+       param = *((__u8 *) sent);
 
-               if (param & SCAN_INQUIRY) {
-                       set_bit(HCI_ISCAN, &hdev->flags);
-                       if (!old_iscan)
-                               mgmt_discoverable(hdev->id, 1);
-               } else if (old_iscan)
-                       mgmt_discoverable(hdev->id, 0);
+       hci_dev_lock(hdev);
 
-               if (param & SCAN_PAGE) {
-                       set_bit(HCI_PSCAN, &hdev->flags);
-                       if (!old_pscan)
-                               mgmt_connectable(hdev->id, 1);
-               } else if (old_pscan)
-                       mgmt_connectable(hdev->id, 0);
+       if (status != 0) {
+               mgmt_write_scan_failed(hdev, param, status);
+               hdev->discov_timeout = 0;
+               goto done;
        }
 
+       old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
+       old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
+
+       if (param & SCAN_INQUIRY) {
+               set_bit(HCI_ISCAN, &hdev->flags);
+               if (!old_iscan)
+                       mgmt_discoverable(hdev, 1);
+               if (hdev->discov_timeout > 0) {
+                       int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+                       queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+                                                                       to);
+               }
+       } else if (old_iscan)
+               mgmt_discoverable(hdev, 0);
+
+       if (param & SCAN_PAGE) {
+               set_bit(HCI_PSCAN, &hdev->flags);
+               if (!old_pscan)
+                       mgmt_connectable(hdev, 1);
+       } else if (old_pscan)
+               mgmt_connectable(hdev, 0);
+
+done:
+       hci_dev_unlock(hdev);
        hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
 }
 
@@ -481,7 +500,7 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
 
        /* CSR 1.1 dongles does not accept any bitfield so don't try to set
         * any event mask for pre 1.2 devices */
-       if (hdev->lmp_ver <= 1)
+       if (hdev->hci_ver < BLUETOOTH_VER_1_2)
                return;
 
        events[4] |= 0x01; /* Flow Specification Complete */
@@ -545,7 +564,7 @@ static void hci_setup(struct hci_dev *hdev)
 {
        hci_setup_event_mask(hdev);
 
-       if (hdev->hci_ver > 1)
+       if (hdev->hci_ver > BLUETOOTH_VER_1_1)
                hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
        if (hdev->features[6] & LMP_SIMPLE_PAIR) {
@@ -700,6 +719,21 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
        hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
 }
 
+static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
+                                               struct sk_buff *skb)
+{
+       struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->flow_ctl_mode = rp->mode;
+
+       hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
+}
+
 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -748,6 +782,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
        hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
 }
 
+static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
+               struct sk_buff *skb)
+{
+       struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->amp_status = rp->amp_status;
+       hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
+       hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
+       hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
+       hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
+       hdev->amp_type = rp->amp_type;
+       hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
+       hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
+       hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
+       hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+       hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
+}
+
 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
                                                        struct sk_buff *skb)
 {
@@ -804,19 +862,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+       hci_dev_lock(hdev);
+
        if (test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status);
+               mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
 
        if (rp->status != 0)
-               return;
+               goto unlock;
 
        cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
        if (!cp)
-               return;
+               goto unlock;
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
        if (conn)
                conn->pin_length = cp->pin_len;
+
+unlock:
+       hci_dev_unlock(hdev);
 }
 
 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -825,10 +888,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+       hci_dev_lock(hdev);
+
        if (test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr,
+               mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
                                                                rp->status);
+
+       hci_dev_unlock(hdev);
 }
+
 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
                                       struct sk_buff *skb)
 {
@@ -855,9 +923,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+       hci_dev_lock(hdev);
+
        if (test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
+               mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
                                                                rp->status);
+
+       hci_dev_unlock(hdev);
 }
 
 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
@@ -867,9 +939,44 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
+                                                               rp->status);
+
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
+                                                               rp->status);
+
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%x", hdev->name, rp->status);
+
+       hci_dev_lock(hdev);
+
        if (test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
+               mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
                                                                rp->status);
+
+       hci_dev_unlock(hdev);
 }
 
 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
@@ -879,8 +986,17 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%x", hdev->name, rp->status);
 
-       mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
+       hci_dev_lock(hdev);
+       mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
                                                rp->randomizer, rp->status);
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%x", hdev->name, status);
 }
 
 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
@@ -899,12 +1015,16 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                return;
 
        if (cp->enable == 0x01) {
+               set_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
                del_timer(&hdev->adv_timer);
 
                hci_dev_lock(hdev);
                hci_adv_entries_clear(hdev);
                hci_dev_unlock(hdev);
        } else if (cp->enable == 0x00) {
+               clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
                mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
        }
 }
@@ -955,12 +1075,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
        if (status) {
                hci_req_complete(hdev, HCI_OP_INQUIRY, status);
                hci_conn_check_pending(hdev);
+               hci_dev_lock(hdev);
+               if (test_bit(HCI_MGMT, &hdev->flags))
+                       mgmt_start_discovery_failed(hdev, status);
+               hci_dev_unlock(hdev);
                return;
        }
 
-       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) &&
-                               test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_discovering(hdev->id, 1);
+       set_bit(HCI_INQUIRY, &hdev->flags);
+
+       hci_dev_lock(hdev);
+       mgmt_discovering(hdev, 1);
+       hci_dev_unlock(hdev);
 }
 
 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -1339,13 +1465,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, status);
 
-       if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) &&
-                               test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_discovering(hdev->id, 0);
-
        hci_req_complete(hdev, HCI_OP_INQUIRY, status);
 
        hci_conn_check_pending(hdev);
+
+       if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
+               return;
+
+       hci_dev_lock(hdev);
+       mgmt_discovering(hdev, 0);
+       hci_dev_unlock(hdev);
 }
 
 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1361,12 +1490,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
 
        hci_dev_lock(hdev);
 
-       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
-               if (test_bit(HCI_MGMT, &hdev->flags))
-                       mgmt_discovering(hdev->id, 1);
-       }
-
        for (; num_rsp; num_rsp--, info++) {
                bacpy(&data.bdaddr, &info->bdaddr);
                data.pscan_rep_mode     = info->pscan_rep_mode;
@@ -1377,8 +1500,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
                data.rssi               = 0x00;
                data.ssp_mode           = 0x00;
                hci_inquiry_cache_update(hdev, &data);
-               mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
-                                                                       NULL);
+               mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+                                               info->dev_class, 0, NULL);
        }
 
        hci_dev_unlock(hdev);
@@ -1412,7 +1535,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        conn->state = BT_CONFIG;
                        hci_conn_hold(conn);
                        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
-                       mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+                       mgmt_connected(hdev, &ev->bdaddr, conn->type,
+                                                       conn->dst_type);
                } else
                        conn->state = BT_CONNECTED;
 
@@ -1434,7 +1558,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                }
 
                /* Set packet type for incoming connection */
-               if (!conn->out && hdev->hci_ver < 3) {
+               if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
                        struct hci_cp_change_conn_ptype cp;
                        cp.handle = ev->handle;
                        cp.pkt_type = cpu_to_le16(conn->pkt_type);
@@ -1444,7 +1568,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
        } else {
                conn->state = BT_CLOSED;
                if (conn->type == ACL_LINK)
-                       mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+                       mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+                                               conn->dst_type, ev->status);
        }
 
        if (conn->type == ACL_LINK)
@@ -1531,7 +1656,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
                struct hci_cp_reject_conn_req cp;
 
                bacpy(&cp.bdaddr, &ev->bdaddr);
-               cp.reason = 0x0f;
+               cp.reason = HCI_ERROR_REJ_BAD_ADDR;
                hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
        }
 }
@@ -1543,24 +1668,27 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
 
        BT_DBG("%s status %d", hdev->name, ev->status);
 
-       if (ev->status) {
-               mgmt_disconnect_failed(hdev->id);
-               return;
-       }
-
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
        if (!conn)
                goto unlock;
 
-       conn->state = BT_CLOSED;
+       if (ev->status == 0)
+               conn->state = BT_CLOSED;
 
-       if (conn->type == ACL_LINK || conn->type == LE_LINK)
-               mgmt_disconnected(hdev->id, &conn->dst);
+       if (conn->type == ACL_LINK || conn->type == LE_LINK) {
+               if (ev->status != 0)
+                       mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
+               else
+                       mgmt_disconnected(hdev, &conn->dst, conn->type,
+                                                       conn->dst_type);
+       }
 
-       hci_proto_disconn_cfm(conn, ev->reason);
-       hci_conn_del(conn);
+       if (ev->status == 0) {
+               hci_proto_disconn_cfm(conn, ev->reason);
+               hci_conn_del(conn);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
@@ -1588,7 +1716,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
                        conn->sec_level = conn->pending_sec_level;
                }
        } else {
-               mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+               mgmt_auth_failed(hdev, &conn->dst, ev->status);
        }
 
        clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1643,7 +1771,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
        hci_dev_lock(hdev);
 
        if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
-               mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
+               mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
        if (!conn)
@@ -1898,6 +2026,14 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
                hci_cc_write_ca_timeout(hdev, skb);
                break;
 
+       case HCI_OP_READ_FLOW_CONTROL_MODE:
+               hci_cc_read_flow_control_mode(hdev, skb);
+               break;
+
+       case HCI_OP_READ_LOCAL_AMP_INFO:
+               hci_cc_read_local_amp_info(hdev, skb);
+               break;
+
        case HCI_OP_DELETE_STORED_LINK_KEY:
                hci_cc_delete_stored_link_key(hdev, skb);
                break;
@@ -1942,6 +2078,17 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
                hci_cc_user_confirm_neg_reply(hdev, skb);
                break;
 
+       case HCI_OP_USER_PASSKEY_REPLY:
+               hci_cc_user_passkey_reply(hdev, skb);
+               break;
+
+       case HCI_OP_USER_PASSKEY_NEG_REPLY:
+               hci_cc_user_passkey_neg_reply(hdev, skb);
+
+       case HCI_OP_LE_SET_SCAN_PARAM:
+               hci_cc_le_set_scan_param(hdev, skb);
+               break;
+
        case HCI_OP_LE_SET_SCAN_ENABLE:
                hci_cc_le_set_scan_enable(hdev, skb);
                break;
@@ -2029,7 +2176,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        case HCI_OP_DISCONNECT:
                if (ev->status != 0)
-                       mgmt_disconnect_failed(hdev->id);
+                       mgmt_disconnect_failed(hdev, NULL, ev->status);
                break;
 
        case HCI_OP_LE_CREATE_CONN:
@@ -2194,7 +2341,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
                else
                        secure = 0;
 
-               mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
+               mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
        }
 
 unlock:
@@ -2363,12 +2510,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
 
        hci_dev_lock(hdev);
 
-       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
-               if (test_bit(HCI_MGMT, &hdev->flags))
-                       mgmt_discovering(hdev->id, 1);
-       }
-
        if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
                struct inquiry_info_with_rssi_and_pscan_mode *info;
                info = (void *) (skb->data + 1);
@@ -2383,7 +2524,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
                        data.rssi               = info->rssi;
                        data.ssp_mode           = 0x00;
                        hci_inquiry_cache_update(hdev, &data);
-                       mgmt_device_found(hdev->id, &info->bdaddr,
+                       mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                                info->dev_class, info->rssi,
                                                NULL);
                }
@@ -2400,7 +2541,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
                        data.rssi               = info->rssi;
                        data.ssp_mode           = 0x00;
                        hci_inquiry_cache_update(hdev, &data);
-                       mgmt_device_found(hdev->id, &info->bdaddr,
+                       mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                                info->dev_class, info->rssi,
                                                NULL);
                }
@@ -2531,12 +2672,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
        if (!num_rsp)
                return;
 
-       if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
-
-               if (test_bit(HCI_MGMT, &hdev->flags))
-                       mgmt_discovering(hdev->id, 1);
-       }
-
        hci_dev_lock(hdev);
 
        for (; num_rsp; num_rsp--, info++) {
@@ -2549,8 +2684,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
                data.rssi               = info->rssi;
                data.ssp_mode           = 0x01;
                hci_inquiry_cache_update(hdev, &data);
-               mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
-                                               info->rssi, info->data);
+               mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
+                               info->dev_class, info->rssi, info->data);
        }
 
        hci_dev_unlock(hdev);
@@ -2614,7 +2749,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
                struct hci_cp_io_capability_neg_reply cp;
 
                bacpy(&cp.bdaddr, &ev->bdaddr);
-               cp.reason = 0x18; /* Pairing not allowed */
+               cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
 
                hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
                                                        sizeof(cp), &cp);
@@ -2706,13 +2841,28 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
        }
 
 confirm:
-       mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
+       mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
                                                                confirm_hint);
 
 unlock:
        hci_dev_unlock(hdev);
 }
 
+static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
+                                                       struct sk_buff *skb)
+{
+       struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_lock(hdev);
+
+       if (test_bit(HCI_MGMT, &hdev->flags))
+               mgmt_user_passkey_request(hdev, &ev->bdaddr);
+
+       hci_dev_unlock(hdev);
+}
+
 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
@@ -2732,7 +2882,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
         * event gets always produced as initiator and is also mapped to
         * the mgmt_auth_failed event */
        if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
-               mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
+               mgmt_auth_failed(hdev, &conn->dst, ev->status);
 
        hci_conn_put(conn);
 
@@ -2813,14 +2963,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
        }
 
        if (ev->status) {
-               mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
+               mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+                                               conn->dst_type, ev->status);
                hci_proto_connect_cfm(conn, ev->status);
                conn->state = BT_CLOSED;
                hci_conn_del(conn);
                goto unlock;
        }
 
-       mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+       mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
 
        conn->sec_level = BT_SECURITY_LOW;
        conn->handle = __le16_to_cpu(ev->handle);
@@ -3051,6 +3202,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_user_confirm_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_USER_PASSKEY_REQUEST:
+               hci_user_passkey_request_evt(hdev, skb);
+               break;
+
        case HCI_EV_SIMPLE_PAIR_COMPLETE:
                hci_simple_pair_complete_evt(hdev, skb);
                break;
@@ -3104,5 +3259,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
        kfree_skb(skb);
 }
 
-module_param(enable_le, bool, 0444);
+module_param(enable_le, bool, 0644);
 MODULE_PARM_DESC(enable_le, "Enable LE support");
index 661b461..c62d254 100644 (file)
@@ -436,17 +436,12 @@ static const struct file_operations inquiry_cache_fops = {
 static int blacklist_show(struct seq_file *f, void *p)
 {
        struct hci_dev *hdev = f->private;
-       struct list_head *l;
+       struct bdaddr_list *b;
 
        hci_dev_lock_bh(hdev);
 
-       list_for_each(l, &hdev->blacklist) {
-               struct bdaddr_list *b;
-
-               b = list_entry(l, struct bdaddr_list, list);
-
+       list_for_each_entry(b, &hdev->blacklist, list)
                seq_printf(f, "%s\n", batostr(&b->bdaddr));
-       }
 
        hci_dev_unlock_bh(hdev);
 
@@ -485,17 +480,12 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
 static int uuids_show(struct seq_file *f, void *p)
 {
        struct hci_dev *hdev = f->private;
-       struct list_head *l;
+       struct bt_uuid *uuid;
 
        hci_dev_lock_bh(hdev);
 
-       list_for_each(l, &hdev->uuids) {
-               struct bt_uuid *uuid;
-
-               uuid = list_entry(l, struct bt_uuid, list);
-
+       list_for_each_entry(uuid, &hdev->uuids, list)
                print_bt_uuid(f, uuid->uuid);
-       }
 
        hci_dev_unlock_bh(hdev);
 
@@ -543,22 +533,28 @@ static int auto_accept_delay_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
                                        auto_accept_delay_set, "%llu\n");
 
-int hci_register_sysfs(struct hci_dev *hdev)
+void hci_init_sysfs(struct hci_dev *hdev)
+{
+       struct device *dev = &hdev->dev;
+
+       dev->type = &bt_host;
+       dev->class = bt_class;
+
+       dev_set_drvdata(dev, hdev);
+       device_initialize(dev);
+}
+
+int hci_add_sysfs(struct hci_dev *hdev)
 {
        struct device *dev = &hdev->dev;
        int err;
 
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
-       dev->type = &bt_host;
-       dev->class = bt_class;
        dev->parent = hdev->parent;
-
        dev_set_name(dev, "%s", hdev->name);
 
-       dev_set_drvdata(dev, hdev);
-
-       err = device_register(dev);
+       err = device_add(dev);
        if (err < 0)
                return err;
 
@@ -582,7 +578,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
        return 0;
 }
 
-void hci_unregister_sysfs(struct hci_dev *hdev)
+void hci_del_sysfs(struct hci_dev *hdev)
 {
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
index 075a3e9..3c2d888 100644 (file)
@@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
 static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr)
 {
        struct hidp_session *session;
-       struct list_head *p;
 
        BT_DBG("");
 
-       list_for_each(p, &hidp_session_list) {
-               session = list_entry(p, struct hidp_session, list);
+       list_for_each_entry(session, &hidp_session_list, list) {
                if (!bacmp(bdaddr, &session->bdaddr))
                        return session;
        }
+
        return NULL;
 }
 
 static void __hidp_link_session(struct hidp_session *session)
 {
-       __module_get(THIS_MODULE);
        list_add(&session->list, &hidp_session_list);
-
-       hci_conn_hold_device(session->conn);
 }
 
 static void __hidp_unlink_session(struct hidp_session *session)
@@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session)
        hci_conn_put_device(session->conn);
 
        list_del(&session->list);
-       module_put(THIS_MODULE);
 }
 
 static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
@@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
 
        BT_DBG("session %p data %p size %d", session, data, size);
 
+       if (atomic_read(&session->terminate))
+               return -EIO;
+
        skb = alloc_skb(size + 1, GFP_ATOMIC);
        if (!skb) {
                BT_ERR("Can't allocate memory for new frame");
@@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid,
        struct sk_buff *skb;
        size_t len;
        int numbered_reports = hid->report_enum[report_type].numbered;
+       int ret;
 
        switch (report_type) {
        case HID_FEATURE_REPORT:
@@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid,
        session->waiting_report_number = numbered_reports ? report_number : -1;
        set_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
        data[0] = report_number;
-       if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1))
-               goto err_eio;
+       ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1);
+       if (ret)
+               goto err;
 
        /* Wait for the return of the report. The returned report
           gets put in session->report_return.  */
@@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid,
                        5*HZ);
                if (res == 0) {
                        /* timeout */
-                       goto err_eio;
+                       ret = -EIO;
+                       goto err;
                }
                if (res < 0) {
                        /* signal */
-                       goto err_restartsys;
+                       ret = -ERESTARTSYS;
+                       goto err;
                }
        }
 
@@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid,
 
        return len;
 
-err_restartsys:
-       clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
-       mutex_unlock(&session->report_mutex);
-       return -ERESTARTSYS;
-err_eio:
+err:
        clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
        mutex_unlock(&session->report_mutex);
-       return -EIO;
+       return ret;
 }
 
 static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
@@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s
 
        /* Set up our wait, and send the report request to the device. */
        set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
-       if (hidp_send_ctrl_message(hid->driver_data, report_type,
-                       data, count)) {
-               ret = -ENOMEM;
+       ret = hidp_send_ctrl_message(hid->driver_data, report_type, data,
+                                                                       count);
+       if (ret)
                goto err;
-       }
 
        /* Wait for the ACK from the device. */
        while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
@@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session,
        case HIDP_HSHK_ERR_INVALID_REPORT_ID:
        case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST:
        case HIDP_HSHK_ERR_INVALID_PARAMETER:
-               if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) {
-                       clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+               if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags))
                        wake_up_interruptible(&session->report_queue);
-               }
+
                /* FIXME: Call into SET_ GET_ handlers here */
                break;
 
@@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session,
        }
 
        /* Wake up the waiting thread. */
-       if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) {
-               clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+       if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags))
                wake_up_interruptible(&session->report_queue);
-       }
 }
 
 static void hidp_process_hid_control(struct hidp_session *session,
@@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
        return kernel_sendmsg(sock, &msg, &iv, 1, len);
 }
 
-static void hidp_process_transmit(struct hidp_session *session)
+static void hidp_process_intr_transmit(struct hidp_session *session)
 {
        struct sk_buff *skb;
 
        BT_DBG("session %p", session);
 
-       while ((skb = skb_dequeue(&session->ctrl_transmit))) {
-               if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
-                       skb_queue_head(&session->ctrl_transmit, skb);
+       while ((skb = skb_dequeue(&session->intr_transmit))) {
+               if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
+                       skb_queue_head(&session->intr_transmit, skb);
                        break;
                }
 
                hidp_set_timer(session);
                kfree_skb(skb);
        }
+}
 
-       while ((skb = skb_dequeue(&session->intr_transmit))) {
-               if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) {
-                       skb_queue_head(&session->intr_transmit, skb);
+static void hidp_process_ctrl_transmit(struct hidp_session *session)
+{
+       struct sk_buff *skb;
+
+       BT_DBG("session %p", session);
+
+       while ((skb = skb_dequeue(&session->ctrl_transmit))) {
+               if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) {
+                       skb_queue_head(&session->ctrl_transmit, skb);
                        break;
                }
 
@@ -700,6 +701,7 @@ static int hidp_session(void *arg)
 
        BT_DBG("session %p", session);
 
+       __module_get(THIS_MODULE);
        set_user_nice(current, -15);
 
        init_waitqueue_entry(&ctrl_wait, current);
@@ -714,23 +716,25 @@ static int hidp_session(void *arg)
                                intr_sk->sk_state != BT_CONNECTED)
                        break;
 
-               while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
+               while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
                        skb_orphan(skb);
                        if (!skb_linearize(skb))
-                               hidp_recv_ctrl_frame(session, skb);
+                               hidp_recv_intr_frame(session, skb);
                        else
                                kfree_skb(skb);
                }
 
-               while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
+               hidp_process_intr_transmit(session);
+
+               while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
                        skb_orphan(skb);
                        if (!skb_linearize(skb))
-                               hidp_recv_intr_frame(session, skb);
+                               hidp_recv_ctrl_frame(session, skb);
                        else
                                kfree_skb(skb);
                }
 
-               hidp_process_transmit(session);
+               hidp_process_ctrl_transmit(session);
 
                schedule();
                set_current_state(TASK_INTERRUPTIBLE);
@@ -739,6 +743,10 @@ static int hidp_session(void *arg)
        remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
        remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
 
+       clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags);
+       clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags);
+       wake_up_interruptible(&session->report_queue);
+
        down_write(&hidp_session_sem);
 
        hidp_del_timer(session);
@@ -772,34 +780,37 @@ static int hidp_session(void *arg)
 
        kfree(session->rd_data);
        kfree(session);
+       module_put_and_exit(0);
        return 0;
 }
 
-static struct device *hidp_get_device(struct hidp_session *session)
+static struct hci_conn *hidp_get_connection(struct hidp_session *session)
 {
        bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
        bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
-       struct device *device = NULL;
+       struct hci_conn *conn;
        struct hci_dev *hdev;
 
        hdev = hci_get_route(dst, src);
        if (!hdev)
                return NULL;
 
-       session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-       if (session->conn)
-               device = &session->conn->dev;
+       hci_dev_lock_bh(hdev);
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+       if (conn)
+               hci_conn_hold_device(conn);
+       hci_dev_unlock_bh(hdev);
 
        hci_dev_put(hdev);
 
-       return device;
+       return conn;
 }
 
 static int hidp_setup_input(struct hidp_session *session,
                                struct hidp_connadd_req *req)
 {
        struct input_dev *input;
-       int err, i;
+       int i;
 
        input = input_allocate_device();
        if (!input)
@@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session,
                input->relbit[0] |= BIT_MASK(REL_WHEEL);
        }
 
-       input->dev.parent = hidp_get_device(session);
+       input->dev.parent = &session->conn->dev;
 
        input->event = hidp_input_event;
 
-       err = input_register_device(input);
-       if (err < 0) {
-               input_free_device(input);
-               session->input = NULL;
-               return err;
-       }
-
        return 0;
 }
 
@@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session,
        strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
        strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
 
-       hid->dev.parent = hidp_get_device(session);
+       hid->dev.parent = &session->conn->dev;
        hid->ll_driver = &hidp_hid_driver;
 
        hid->hid_get_raw_report = hidp_get_raw_report;
@@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
                        bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
                return -ENOTUNIQ;
 
-       session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
-       if (!session)
-               return -ENOMEM;
-
        BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size);
 
        down_write(&hidp_session_sem);
 
        s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst);
        if (s && s->state == BT_CONNECTED) {
-               err = -EEXIST;
-               goto failed;
+               up_write(&hidp_session_sem);
+               return -EEXIST;
+       }
+
+       session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
+       if (!session) {
+               up_write(&hidp_session_sem);
+               return -ENOMEM;
        }
 
        bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
@@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
        session->intr_sock = intr_sock;
        session->state     = BT_CONNECTED;
 
+       session->conn = hidp_get_connection(session);
+       if (!session->conn) {
+               err = -ENOTCONN;
+               goto failed;
+       }
+
        setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session);
 
        skb_queue_head_init(&session->ctrl_transmit);
@@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
        session->flags   = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID);
        session->idle_to = req->idle_to;
 
+       __hidp_link_session(session);
+
        if (req->rd_size > 0) {
                err = hidp_setup_hid(session, req);
-               if (err && err != -ENODEV)
+               if (err)
                        goto purge;
        }
 
@@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
                        goto purge;
        }
 
-       __hidp_link_session(session);
-
        hidp_set_timer(session);
 
        if (session->hid) {
@@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
                        !session->waiting_for_startup);
        }
 
-       err = hid_add_device(session->hid);
+       if (session->hid)
+               err = hid_add_device(session->hid);
+       else
+               err = input_register_device(session->input);
+
        if (err < 0) {
                atomic_inc(&session->terminate);
                wake_up_process(session->task);
@@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
 unlink:
        hidp_del_timer(session);
 
-       __hidp_unlink_session(session);
-
        if (session->input) {
                input_unregister_device(session->input);
                session->input = NULL;
@@ -1093,6 +1107,8 @@ unlink:
        session->rd_data = NULL;
 
 purge:
+       __hidp_unlink_session(session);
+
        skb_queue_purge(&session->ctrl_transmit);
        skb_queue_purge(&session->intr_transmit);
 
@@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req)
 
 int hidp_get_connlist(struct hidp_connlist_req *req)
 {
-       struct list_head *p;
+       struct hidp_session *session;
        int err = 0, n = 0;
 
        BT_DBG("");
 
        down_read(&hidp_session_sem);
 
-       list_for_each(p, &hidp_session_list) {
-               struct hidp_session *session;
+       list_for_each_entry(session, &hidp_session_list, list) {
                struct hidp_conninfo ci;
 
-               session = list_entry(p, struct hidp_session, list);
-
                __hidp_copy_session(session, &ci);
 
                if (copy_to_user(req->ci, &ci, sizeof(ci))) {
index 5ea94a1..014fdec 100644 (file)
@@ -59,7 +59,7 @@
 int disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { 0x02, };
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
 
 static LIST_HEAD(chan_list);
 static DEFINE_RWLOCK(chan_list_lock);
@@ -96,7 +96,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16
                        return c;
        }
        return NULL;
-
 }
 
 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
@@ -153,12 +152,9 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
 
        list_for_each_entry(c, &chan_list, global_l) {
                if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
-                       goto found;
+                       return c;
        }
-
-       c = NULL;
-found:
-       return c;
+       return NULL;
 }
 
 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
@@ -219,7 +215,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
 
 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
 {
-       BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
+       BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
 
        if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
                chan_hold(chan);
@@ -233,8 +229,37 @@ static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
                chan_put(chan);
 }
 
+static char *state_to_string(int state)
+{
+       switch(state) {
+       case BT_CONNECTED:
+               return "BT_CONNECTED";
+       case BT_OPEN:
+               return "BT_OPEN";
+       case BT_BOUND:
+               return "BT_BOUND";
+       case BT_LISTEN:
+               return "BT_LISTEN";
+       case BT_CONNECT:
+               return "BT_CONNECT";
+       case BT_CONNECT2:
+               return "BT_CONNECT2";
+       case BT_CONFIG:
+               return "BT_CONFIG";
+       case BT_DISCONN:
+               return "BT_DISCONN";
+       case BT_CLOSED:
+               return "BT_CLOSED";
+       }
+
+       return "invalid state";
+}
+
 static void l2cap_state_change(struct l2cap_chan *chan, int state)
 {
+       BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
+                                               state_to_string(state));
+
        chan->state = state;
        chan->ops->state_change(chan->data, state);
 }
@@ -293,6 +318,8 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
 
        atomic_set(&chan->refcnt, 1);
 
+       BT_DBG("sk %p chan %p", sk, chan);
+
        return chan;
 }
 
@@ -310,7 +337,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
        BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
                        chan->psm, chan->dcid);
 
-       conn->disc_reason = 0x13;
+       conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
 
        chan->conn = conn;
 
@@ -337,6 +364,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
                chan->omtu = L2CAP_DEFAULT_MTU;
        }
 
+       chan->local_id          = L2CAP_BESTEFFORT_ID;
+       chan->local_stype       = L2CAP_SERV_BESTEFFORT;
+       chan->local_msdu        = L2CAP_DEFAULT_MAX_SDU_SIZE;
+       chan->local_sdu_itime   = L2CAP_DEFAULT_SDU_ITIME;
+       chan->local_acc_lat     = L2CAP_DEFAULT_ACC_LAT;
+       chan->local_flush_to    = L2CAP_DEFAULT_FLUSH_TO;
+
        chan_hold(chan);
 
        list_add(&chan->list, &conn->chan_l);
@@ -508,7 +542,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
 }
 
 /* Service level security */
-static inline int l2cap_check_security(struct l2cap_chan *chan)
+int l2cap_chan_check_security(struct l2cap_chan *chan)
 {
        struct l2cap_conn *conn = chan->conn;
        __u8 auth_type;
@@ -556,34 +590,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
                flags = ACL_START;
 
        bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
+       skb->priority = HCI_PRIO_MAX;
+
+       hci_send_acl(conn->hchan, skb, flags);
+}
+
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+       struct hci_conn *hcon = chan->conn->hcon;
+       u16 flags;
+
+       BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+                                                       skb->priority);
 
-       hci_send_acl(conn->hcon, skb, flags);
+       if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
+                                       lmp_no_flush_capable(hcon->hdev))
+               flags = ACL_START_NO_FLUSH;
+       else
+               flags = ACL_START;
+
+       bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+       hci_send_acl(chan->conn->hchan, skb, flags);
 }
 
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
 {
        struct sk_buff *skb;
        struct l2cap_hdr *lh;
        struct l2cap_conn *conn = chan->conn;
-       int count, hlen = L2CAP_HDR_SIZE + 2;
-       u8 flags;
+       int count, hlen;
 
        if (chan->state != BT_CONNECTED)
                return;
 
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               hlen = L2CAP_EXT_HDR_SIZE;
+       else
+               hlen = L2CAP_ENH_HDR_SIZE;
+
        if (chan->fcs == L2CAP_FCS_CRC16)
-               hlen += 2;
+               hlen += L2CAP_FCS_SIZE;
 
-       BT_DBG("chan %p, control 0x%2.2x", chan, control);
+       BT_DBG("chan %p, control 0x%8.8x", chan, control);
 
        count = min_t(unsigned int, conn->mtu, hlen);
-       control |= L2CAP_CTRL_FRAME_TYPE;
+
+       control |= __set_sframe(chan);
 
        if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
-               control |= L2CAP_CTRL_FINAL;
+               control |= __set_ctrl_final(chan);
 
        if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
-               control |= L2CAP_CTRL_POLL;
+               control |= __set_ctrl_poll(chan);
 
        skb = bt_skb_alloc(count, GFP_ATOMIC);
        if (!skb)
@@ -592,32 +650,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
-       put_unaligned_le16(control, skb_put(skb, 2));
+
+       __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
 
        if (chan->fcs == L2CAP_FCS_CRC16) {
-               u16 fcs = crc16(0, (u8 *)lh, count - 2);
-               put_unaligned_le16(fcs, skb_put(skb, 2));
+               u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+               put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
        }
 
-       if (lmp_no_flush_capable(conn->hcon->hdev))
-               flags = ACL_START_NO_FLUSH;
-       else
-               flags = ACL_START;
-
-       bt_cb(skb)->force_active = chan->force_active;
-
-       hci_send_acl(chan->conn->hcon, skb, flags);
+       skb->priority = HCI_PRIO_MAX;
+       l2cap_do_send(chan, skb);
 }
 
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
+static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
 {
        if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
+               control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
                set_bit(CONN_RNR_SENT, &chan->conn_state);
        } else
-               control |= L2CAP_SUPER_RCV_READY;
+               control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
 
-       control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+       control |= __set_reqseq(chan, chan->buffer_seq);
 
        l2cap_send_sframe(chan, control);
 }
@@ -635,7 +688,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
                if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
                        return;
 
-               if (l2cap_check_security(chan) &&
+               if (l2cap_chan_check_security(chan) &&
                                __l2cap_no_conn_pending(chan)) {
                        struct l2cap_conn_req req;
                        req.scid = cpu_to_le16(chan->scid);
@@ -725,7 +778,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                if (chan->state == BT_CONNECT) {
                        struct l2cap_conn_req req;
 
-                       if (!l2cap_check_security(chan) ||
+                       if (!l2cap_chan_check_security(chan) ||
                                        !__l2cap_no_conn_pending(chan)) {
                                bh_unlock_sock(sk);
                                continue;
@@ -758,7 +811,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                        rsp.scid = cpu_to_le16(chan->dcid);
                        rsp.dcid = cpu_to_le16(chan->scid);
 
-                       if (l2cap_check_security(chan)) {
+                       if (l2cap_chan_check_security(chan)) {
                                if (bt_sk(sk)->defer_setup) {
                                        struct sock *parent = bt_sk(sk)->parent;
                                        rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -947,7 +1000,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
        list_for_each_entry(chan, &conn->chan_l, list) {
                struct sock *sk = chan->sk;
 
-               if (chan->force_reliable)
+               if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
                        sk->sk_err = err;
        }
 
@@ -986,6 +1039,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
                chan->ops->close(chan->data);
        }
 
+       hci_chan_del(conn->hchan);
+
        if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
                del_timer_sync(&conn->info_timer);
 
@@ -1008,18 +1063,26 @@ static void security_timeout(unsigned long arg)
 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
 {
        struct l2cap_conn *conn = hcon->l2cap_data;
+       struct hci_chan *hchan;
 
        if (conn || status)
                return conn;
 
+       hchan = hci_chan_create(hcon);
+       if (!hchan)
+               return NULL;
+
        conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
-       if (!conn)
+       if (!conn) {
+               hci_chan_del(hchan);
                return NULL;
+       }
 
        hcon->l2cap_data = conn;
        conn->hcon = hcon;
+       conn->hchan = hchan;
 
-       BT_DBG("hcon %p conn %p", hcon, conn);
+       BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
 
        if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
                conn->mtu = hcon->hdev->le_mtu;
@@ -1043,7 +1106,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
                setup_timer(&conn->info_timer, l2cap_info_timeout,
                                                (unsigned long) conn);
 
-       conn->disc_reason = 0x13;
+       conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
 
        return conn;
 }
@@ -1142,7 +1205,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
        if (hcon->state == BT_CONNECTED) {
                if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
                        __clear_chan_timer(chan);
-                       if (l2cap_check_security(chan))
+                       if (l2cap_chan_check_security(chan))
                                l2cap_state_change(chan, BT_CONNECTED);
                } else
                        l2cap_do_start(chan);
@@ -1245,60 +1308,46 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
                __clear_retrans_timer(chan);
 }
 
-static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
-{
-       struct hci_conn *hcon = chan->conn->hcon;
-       u16 flags;
-
-       BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
-
-       if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
-               flags = ACL_START_NO_FLUSH;
-       else
-               flags = ACL_START;
-
-       bt_cb(skb)->force_active = chan->force_active;
-       hci_send_acl(hcon, skb, flags);
-}
-
 static void l2cap_streaming_send(struct l2cap_chan *chan)
 {
        struct sk_buff *skb;
-       u16 control, fcs;
+       u32 control;
+       u16 fcs;
 
        while ((skb = skb_dequeue(&chan->tx_q))) {
-               control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
-               control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
-               put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
+               control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
+               control |= __set_txseq(chan, chan->next_tx_seq);
+               __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
 
                if (chan->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
-                       put_unaligned_le16(fcs, skb->data + skb->len - 2);
+                       fcs = crc16(0, (u8 *)skb->data,
+                                               skb->len - L2CAP_FCS_SIZE);
+                       put_unaligned_le16(fcs,
+                                       skb->data + skb->len - L2CAP_FCS_SIZE);
                }
 
                l2cap_do_send(chan, skb);
 
-               chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+               chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
        }
 }
 
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
 {
        struct sk_buff *skb, *tx_skb;
-       u16 control, fcs;
+       u16 fcs;
+       u32 control;
 
        skb = skb_peek(&chan->tx_q);
        if (!skb)
                return;
 
-       do {
-               if (bt_cb(skb)->tx_seq == tx_seq)
-                       break;
-
+       while (bt_cb(skb)->tx_seq != tx_seq) {
                if (skb_queue_is_last(&chan->tx_q, skb))
                        return;
 
-       } while ((skb = skb_queue_next(&chan->tx_q, skb)));
+               skb = skb_queue_next(&chan->tx_q, skb);
+       }
 
        if (chan->remote_max_tx &&
                        bt_cb(skb)->retries == chan->remote_max_tx) {
@@ -1308,20 +1357,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
 
        tx_skb = skb_clone(skb, GFP_ATOMIC);
        bt_cb(skb)->retries++;
-       control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-       control &= L2CAP_CTRL_SAR;
+
+       control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+       control &= __get_sar_mask(chan);
 
        if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
-               control |= L2CAP_CTRL_FINAL;
+               control |= __set_ctrl_final(chan);
 
-       control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-                       | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+       control |= __set_reqseq(chan, chan->buffer_seq);
+       control |= __set_txseq(chan, tx_seq);
 
-       put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+       __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
 
        if (chan->fcs == L2CAP_FCS_CRC16) {
-               fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
-               put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+               fcs = crc16(0, (u8 *)tx_skb->data,
+                                               tx_skb->len - L2CAP_FCS_SIZE);
+               put_unaligned_le16(fcs,
+                               tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
        }
 
        l2cap_do_send(chan, tx_skb);
@@ -1330,7 +1382,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
 static int l2cap_ertm_send(struct l2cap_chan *chan)
 {
        struct sk_buff *skb, *tx_skb;
-       u16 control, fcs;
+       u16 fcs;
+       u32 control;
        int nsent = 0;
 
        if (chan->state != BT_CONNECTED)
@@ -1348,20 +1401,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
 
                bt_cb(skb)->retries++;
 
-               control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
-               control &= L2CAP_CTRL_SAR;
+               control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
+               control &= __get_sar_mask(chan);
 
                if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
-                       control |= L2CAP_CTRL_FINAL;
+                       control |= __set_ctrl_final(chan);
 
-               control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
-                               | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
-               put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+               control |= __set_reqseq(chan, chan->buffer_seq);
+               control |= __set_txseq(chan, chan->next_tx_seq);
 
+               __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
 
                if (chan->fcs == L2CAP_FCS_CRC16) {
-                       fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
-                       put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+                       fcs = crc16(0, (u8 *)skb->data,
+                                               tx_skb->len - L2CAP_FCS_SIZE);
+                       put_unaligned_le16(fcs, skb->data +
+                                               tx_skb->len - L2CAP_FCS_SIZE);
                }
 
                l2cap_do_send(chan, tx_skb);
@@ -1369,7 +1424,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
                __set_retrans_timer(chan);
 
                bt_cb(skb)->tx_seq = chan->next_tx_seq;
-               chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
+
+               chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
 
                if (bt_cb(skb)->retries == 1)
                        chan->unacked_frames++;
@@ -1401,12 +1457,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan)
 
 static void l2cap_send_ack(struct l2cap_chan *chan)
 {
-       u16 control = 0;
+       u32 control = 0;
 
-       control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+       control |= __set_reqseq(chan, chan->buffer_seq);
 
        if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
+               control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
                set_bit(CONN_RNR_SENT, &chan->conn_state);
                l2cap_send_sframe(chan, control);
                return;
@@ -1415,20 +1471,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
        if (l2cap_ertm_send(chan) > 0)
                return;
 
-       control |= L2CAP_SUPER_RCV_READY;
+       control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
        l2cap_send_sframe(chan, control);
 }
 
 static void l2cap_send_srejtail(struct l2cap_chan *chan)
 {
        struct srej_list *tail;
-       u16 control;
+       u32 control;
 
-       control = L2CAP_SUPER_SELECT_REJECT;
-       control |= L2CAP_CTRL_FINAL;
+       control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+       control |= __set_ctrl_final(chan);
 
        tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
-       control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+       control |= __set_reqseq(chan, tail->tx_seq);
 
        l2cap_send_sframe(chan, control);
 }
@@ -1456,6 +1512,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
                if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
                        return -EFAULT;
 
+               (*frag)->priority = skb->priority;
+
                sent += count;
                len  -= count;
 
@@ -1465,15 +1523,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
        return sent;
 }
 
-static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
+                                               struct msghdr *msg, size_t len,
+                                               u32 priority)
 {
        struct sock *sk = chan->sk;
        struct l2cap_conn *conn = chan->conn;
        struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE + 2;
+       int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
        struct l2cap_hdr *lh;
 
-       BT_DBG("sk %p len %d", sk, (int)len);
+       BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
 
        count = min_t(unsigned int, (conn->mtu - hlen), len);
        skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1481,6 +1541,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
        if (!skb)
                return ERR_PTR(err);
 
+       skb->priority = priority;
+
        /* Create L2CAP header */
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
@@ -1495,7 +1557,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct
        return skb;
 }
 
-static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
+                                               struct msghdr *msg, size_t len,
+                                               u32 priority)
 {
        struct sock *sk = chan->sk;
        struct l2cap_conn *conn = chan->conn;
@@ -1511,6 +1575,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
        if (!skb)
                return ERR_PTR(err);
 
+       skb->priority = priority;
+
        /* Create L2CAP header */
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
@@ -1526,12 +1592,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms
 
 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
                                                struct msghdr *msg, size_t len,
-                                               u16 control, u16 sdulen)
+                                               u32 control, u16 sdulen)
 {
        struct sock *sk = chan->sk;
        struct l2cap_conn *conn = chan->conn;
        struct sk_buff *skb;
-       int err, count, hlen = L2CAP_HDR_SIZE + 2;
+       int err, count, hlen;
        struct l2cap_hdr *lh;
 
        BT_DBG("sk %p len %d", sk, (int)len);
@@ -1539,11 +1605,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
        if (!conn)
                return ERR_PTR(-ENOTCONN);
 
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               hlen = L2CAP_EXT_HDR_SIZE;
+       else
+               hlen = L2CAP_ENH_HDR_SIZE;
+
        if (sdulen)
-               hlen += 2;
+               hlen += L2CAP_SDULEN_SIZE;
 
        if (chan->fcs == L2CAP_FCS_CRC16)
-               hlen += 2;
+               hlen += L2CAP_FCS_SIZE;
 
        count = min_t(unsigned int, (conn->mtu - hlen), len);
        skb = bt_skb_send_alloc(sk, count + hlen,
@@ -1555,9 +1626,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
        lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
-       put_unaligned_le16(control, skb_put(skb, 2));
+
+       __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+
        if (sdulen)
-               put_unaligned_le16(sdulen, skb_put(skb, 2));
+               put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
 
        err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
        if (unlikely(err < 0)) {
@@ -1566,7 +1639,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
        }
 
        if (chan->fcs == L2CAP_FCS_CRC16)
-               put_unaligned_le16(0, skb_put(skb, 2));
+               put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
 
        bt_cb(skb)->retries = 0;
        return skb;
@@ -1576,11 +1649,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
 {
        struct sk_buff *skb;
        struct sk_buff_head sar_queue;
-       u16 control;
+       u32 control;
        size_t size = 0;
 
        skb_queue_head_init(&sar_queue);
-       control = L2CAP_SDU_START;
+       control = __set_ctrl_sar(chan, L2CAP_SAR_START);
        skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
@@ -1593,10 +1666,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
                size_t buflen;
 
                if (len > chan->remote_mps) {
-                       control = L2CAP_SDU_CONTINUE;
+                       control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
                        buflen = chan->remote_mps;
                } else {
-                       control = L2CAP_SDU_END;
+                       control = __set_ctrl_sar(chan, L2CAP_SAR_END);
                        buflen = len;
                }
 
@@ -1617,15 +1690,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si
        return size;
 }
 
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+                                                               u32 priority)
 {
        struct sk_buff *skb;
-       u16 control;
+       u32 control;
        int err;
 
        /* Connectionless channel */
        if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
-               skb = l2cap_create_connless_pdu(chan, msg, len);
+               skb = l2cap_create_connless_pdu(chan, msg, len, priority);
                if (IS_ERR(skb))
                        return PTR_ERR(skb);
 
@@ -1640,7 +1714,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
                        return -EMSGSIZE;
 
                /* Create a basic PDU */
-               skb = l2cap_create_basic_pdu(chan, msg, len);
+               skb = l2cap_create_basic_pdu(chan, msg, len, priority);
                if (IS_ERR(skb))
                        return PTR_ERR(skb);
 
@@ -1652,7 +1726,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
        case L2CAP_MODE_STREAMING:
                /* Entire SDU fits into one PDU */
                if (len <= chan->remote_mps) {
-                       control = L2CAP_SDU_UNSEGMENTED;
+                       control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
                        skb = l2cap_create_iframe_pdu(chan, msg, len, control,
                                                                        0);
                        if (IS_ERR(skb))
@@ -1850,6 +1924,37 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
        *ptr += L2CAP_CONF_OPT_SIZE + len;
 }
 
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+{
+       struct l2cap_conf_efs efs;
+
+       switch (chan->mode) {
+       case L2CAP_MODE_ERTM:
+               efs.id          = chan->local_id;
+               efs.stype       = chan->local_stype;
+               efs.msdu        = cpu_to_le16(chan->local_msdu);
+               efs.sdu_itime   = cpu_to_le32(chan->local_sdu_itime);
+               efs.acc_lat     = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+               efs.flush_to    = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+               break;
+
+       case L2CAP_MODE_STREAMING:
+               efs.id          = 1;
+               efs.stype       = L2CAP_SERV_BESTEFFORT;
+               efs.msdu        = cpu_to_le16(chan->local_msdu);
+               efs.sdu_itime   = cpu_to_le32(chan->local_sdu_itime);
+               efs.acc_lat     = 0;
+               efs.flush_to    = 0;
+               break;
+
+       default:
+               return;
+       }
+
+       l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+                                                       (unsigned long) &efs);
+}
+
 static void l2cap_ack_timeout(unsigned long arg)
 {
        struct l2cap_chan *chan = (void *) arg;
@@ -1896,11 +2001,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
        }
 }
 
+static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+{
+       return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+}
+
+static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+{
+       return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+}
+
+static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+{
+       if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
+                                               __l2cap_ews_supported(chan)) {
+               /* use extended control field */
+               set_bit(FLAG_EXT_CTRL, &chan->flags);
+               chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+       } else {
+               chan->tx_win = min_t(u16, chan->tx_win,
+                                               L2CAP_DEFAULT_TX_WINDOW);
+               chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+       }
+}
+
 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
 {
        struct l2cap_conf_req *req = data;
        struct l2cap_conf_rfc rfc = { .mode = chan->mode };
        void *ptr = req->data;
+       u16 size;
 
        BT_DBG("chan %p", chan);
 
@@ -1913,6 +2043,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
                if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
                        break;
 
+               if (__l2cap_efs_supported(chan))
+                       set_bit(FLAG_EFS_ENABLE, &chan->flags);
+
                /* fall through */
        default:
                chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
@@ -1942,17 +2075,27 @@ done:
 
        case L2CAP_MODE_ERTM:
                rfc.mode            = L2CAP_MODE_ERTM;
-               rfc.txwin_size      = chan->tx_win;
                rfc.max_transmit    = chan->max_tx;
                rfc.retrans_timeout = 0;
                rfc.monitor_timeout = 0;
-               rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
-               if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
-                       rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+               size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+                                               L2CAP_EXT_HDR_SIZE -
+                                               L2CAP_SDULEN_SIZE -
+                                               L2CAP_FCS_SIZE);
+               rfc.max_pdu_size = cpu_to_le16(size);
+
+               l2cap_txwin_setup(chan);
+
+               rfc.txwin_size = min_t(u16, chan->tx_win,
+                                               L2CAP_DEFAULT_TX_WINDOW);
 
                l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
                                                        (unsigned long) &rfc);
 
+               if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+                       l2cap_add_opt_efs(&ptr, chan);
+
                if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
                        break;
 
@@ -1961,6 +2104,10 @@ done:
                        chan->fcs = L2CAP_FCS_NONE;
                        l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
                }
+
+               if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+                                                               chan->tx_win);
                break;
 
        case L2CAP_MODE_STREAMING:
@@ -1969,13 +2116,19 @@ done:
                rfc.max_transmit    = 0;
                rfc.retrans_timeout = 0;
                rfc.monitor_timeout = 0;
-               rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
-               if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
-                       rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+
+               size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
+                                               L2CAP_EXT_HDR_SIZE -
+                                               L2CAP_SDULEN_SIZE -
+                                               L2CAP_FCS_SIZE);
+               rfc.max_pdu_size = cpu_to_le16(size);
 
                l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
                                                        (unsigned long) &rfc);
 
+               if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+                       l2cap_add_opt_efs(&ptr, chan);
+
                if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
                        break;
 
@@ -2002,8 +2155,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
        int type, hint, olen;
        unsigned long val;
        struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+       struct l2cap_conf_efs efs;
+       u8 remote_efs = 0;
        u16 mtu = L2CAP_DEFAULT_MTU;
        u16 result = L2CAP_CONF_SUCCESS;
+       u16 size;
 
        BT_DBG("chan %p", chan);
 
@@ -2033,7 +2189,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
                case L2CAP_CONF_FCS:
                        if (val == L2CAP_FCS_NONE)
                                set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
+                       break;
 
+               case L2CAP_CONF_EFS:
+                       remote_efs = 1;
+                       if (olen == sizeof(efs))
+                               memcpy(&efs, (void *) val, olen);
+                       break;
+
+               case L2CAP_CONF_EWS:
+                       if (!enable_hs)
+                               return -ECONNREFUSED;
+
+                       set_bit(FLAG_EXT_CTRL, &chan->flags);
+                       set_bit(CONF_EWS_RECV, &chan->conf_state);
+                       chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
+                       chan->remote_tx_win = val;
                        break;
 
                default:
@@ -2058,6 +2229,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
                        break;
                }
 
+               if (remote_efs) {
+                       if (__l2cap_efs_supported(chan))
+                               set_bit(FLAG_EFS_ENABLE, &chan->flags);
+                       else
+                               return -ECONNREFUSED;
+               }
+
                if (chan->mode != rfc.mode)
                        return -ECONNREFUSED;
 
@@ -2076,7 +2254,6 @@ done:
                                        sizeof(rfc), (unsigned long) &rfc);
        }
 
-
        if (result == L2CAP_CONF_SUCCESS) {
                /* Configure output options and let the other side know
                 * which ones we don't like. */
@@ -2089,6 +2266,26 @@ done:
                }
                l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
 
+               if (remote_efs) {
+                       if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+                                       efs.stype != L2CAP_SERV_NOTRAFIC &&
+                                       efs.stype != chan->local_stype) {
+
+                               result = L2CAP_CONF_UNACCEPT;
+
+                               if (chan->num_conf_req >= 1)
+                                       return -ECONNREFUSED;
+
+                               l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+                                                       sizeof(efs),
+                                                       (unsigned long) &efs);
+                       } else {
+                               /* Send PENDING Conf Rsp */
+                               result = L2CAP_CONF_PENDING;
+                               set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+                       }
+               }
+
                switch (rfc.mode) {
                case L2CAP_MODE_BASIC:
                        chan->fcs = L2CAP_FCS_NONE;
@@ -2096,13 +2293,20 @@ done:
                        break;
 
                case L2CAP_MODE_ERTM:
-                       chan->remote_tx_win = rfc.txwin_size;
-                       chan->remote_max_tx = rfc.max_transmit;
+                       if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
+                               chan->remote_tx_win = rfc.txwin_size;
+                       else
+                               rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
 
-                       if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
-                               rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
+                       chan->remote_max_tx = rfc.max_transmit;
 
-                       chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+                       size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+                                               chan->conn->mtu -
+                                               L2CAP_EXT_HDR_SIZE -
+                                               L2CAP_SDULEN_SIZE -
+                                               L2CAP_FCS_SIZE);
+                       rfc.max_pdu_size = cpu_to_le16(size);
+                       chan->remote_mps = size;
 
                        rfc.retrans_timeout =
                                le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
@@ -2114,13 +2318,29 @@ done:
                        l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
                                        sizeof(rfc), (unsigned long) &rfc);
 
+                       if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+                               chan->remote_id = efs.id;
+                               chan->remote_stype = efs.stype;
+                               chan->remote_msdu = le16_to_cpu(efs.msdu);
+                               chan->remote_flush_to =
+                                               le32_to_cpu(efs.flush_to);
+                               chan->remote_acc_lat =
+                                               le32_to_cpu(efs.acc_lat);
+                               chan->remote_sdu_itime =
+                                       le32_to_cpu(efs.sdu_itime);
+                               l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+                                       sizeof(efs), (unsigned long) &efs);
+                       }
                        break;
 
                case L2CAP_MODE_STREAMING:
-                       if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
-                               rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
-
-                       chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
+                       size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
+                                               chan->conn->mtu -
+                                               L2CAP_EXT_HDR_SIZE -
+                                               L2CAP_SDULEN_SIZE -
+                                               L2CAP_FCS_SIZE);
+                       rfc.max_pdu_size = cpu_to_le16(size);
+                       chan->remote_mps = size;
 
                        set_bit(CONF_MODE_DONE, &chan->conf_state);
 
@@ -2153,6 +2373,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
        int type, olen;
        unsigned long val;
        struct l2cap_conf_rfc rfc;
+       struct l2cap_conf_efs efs;
 
        BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 
@@ -2188,6 +2409,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
                        l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
                                        sizeof(rfc), (unsigned long) &rfc);
                        break;
+
+               case L2CAP_CONF_EWS:
+                       chan->tx_win = min_t(u16, val,
+                                               L2CAP_DEFAULT_EXT_WINDOW);
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+                                                       chan->tx_win);
+                       break;
+
+               case L2CAP_CONF_EFS:
+                       if (olen == sizeof(efs))
+                               memcpy(&efs, (void *)val, olen);
+
+                       if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+                                       efs.stype != L2CAP_SERV_NOTRAFIC &&
+                                       efs.stype != chan->local_stype)
+                               return -ECONNREFUSED;
+
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+                                       sizeof(efs), (unsigned long) &efs);
+                       break;
                }
        }
 
@@ -2196,13 +2437,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
 
        chan->mode = rfc.mode;
 
-       if (*result == L2CAP_CONF_SUCCESS) {
+       if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
                switch (rfc.mode) {
                case L2CAP_MODE_ERTM:
                        chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
                        chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
                        chan->mps    = le16_to_cpu(rfc.max_pdu_size);
+
+                       if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+                               chan->local_msdu = le16_to_cpu(efs.msdu);
+                               chan->local_sdu_itime =
+                                               le32_to_cpu(efs.sdu_itime);
+                               chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
+                               chan->local_flush_to =
+                                               le32_to_cpu(efs.flush_to);
+                       }
                        break;
+
                case L2CAP_MODE_STREAMING:
                        chan->mps    = le16_to_cpu(rfc.max_pdu_size);
                }
@@ -2330,7 +2581,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
        /* Check if the ACL is secure enough (if not SDP) */
        if (psm != cpu_to_le16(0x0001) &&
                                !hci_conn_check_link_mode(conn->hcon)) {
-               conn->disc_reason = 0x05;
+               conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
                result = L2CAP_CR_SEC_BLOCK;
                goto response;
        }
@@ -2377,7 +2628,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
        chan->ident = cmd->ident;
 
        if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
-               if (l2cap_check_security(chan)) {
+               if (l2cap_chan_check_security(chan)) {
                        if (bt_sk(sk)->defer_setup) {
                                l2cap_state_change(chan, BT_CONNECT2);
                                result = L2CAP_CR_PEND;
@@ -2602,6 +2853,21 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
                chan->num_conf_req++;
        }
 
+       /* Got Conf Rsp PENDING from remote side and asume we sent
+          Conf Rsp PENDING in the code above */
+       if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
+                       test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+
+               /* check compatibility */
+
+               clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+               set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+                                       l2cap_build_conf_rsp(chan, rsp,
+                                       L2CAP_CONF_SUCCESS, 0x0000), rsp);
+       }
+
 unlock:
        bh_unlock_sock(sk);
        return 0;
@@ -2631,8 +2897,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
        switch (result) {
        case L2CAP_CONF_SUCCESS:
                l2cap_conf_rfc_get(chan, rsp->data, len);
+               clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
                break;
 
+       case L2CAP_CONF_PENDING:
+               set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
+
+               if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
+                       char buf[64];
+
+                       len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+                                                               buf, &result);
+                       if (len < 0) {
+                               l2cap_send_disconn_req(conn, chan, ECONNRESET);
+                               goto done;
+                       }
+
+                       /* check compatibility */
+
+                       clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
+                       set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
+
+                       l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+                                               l2cap_build_conf_rsp(chan, buf,
+                                               L2CAP_CONF_SUCCESS, 0x0000), buf);
+               }
+               goto done;
+
        case L2CAP_CONF_UNACCEPT:
                if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
                        char req[64];
@@ -2750,7 +3041,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
 
        /* don't delete l2cap channel if sk is owned by user */
        if (sock_owned_by_user(sk)) {
-               l2cap_state_change(chan,BT_DISCONN);
+               l2cap_state_change(chan, BT_DISCONN);
                __clear_chan_timer(chan);
                __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
                bh_unlock_sock(sk);
@@ -2782,15 +3073,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
                if (!disable_ertm)
                        feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
                                                         | L2CAP_FEAT_FCS;
+               if (enable_hs)
+                       feat_mask |= L2CAP_FEAT_EXT_FLOW
+                                               | L2CAP_FEAT_EXT_WINDOW;
+
                put_unaligned_le32(feat_mask, rsp->data);
                l2cap_send_cmd(conn, cmd->ident,
                                        L2CAP_INFO_RSP, sizeof(buf), buf);
        } else if (type == L2CAP_IT_FIXED_CHAN) {
                u8 buf[12];
                struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
+
+               if (enable_hs)
+                       l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
+               else
+                       l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
+
                rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
                rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
-               memcpy(buf + 4, l2cap_fixed_chan, 8);
+               memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
                l2cap_send_cmd(conn, cmd->ident,
                                        L2CAP_INFO_RSP, sizeof(buf), buf);
        } else {
@@ -2857,6 +3158,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
        return 0;
 }
 
+static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
+                                       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
+                                       void *data)
+{
+       struct l2cap_create_chan_req *req = data;
+       struct l2cap_create_chan_rsp rsp;
+       u16 psm, scid;
+
+       if (cmd_len != sizeof(*req))
+               return -EPROTO;
+
+       if (!enable_hs)
+               return -EINVAL;
+
+       psm = le16_to_cpu(req->psm);
+       scid = le16_to_cpu(req->scid);
+
+       BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+
+       /* Placeholder: Always reject */
+       rsp.dcid = 0;
+       rsp.scid = cpu_to_le16(scid);
+       rsp.result = L2CAP_CR_NO_MEM;
+       rsp.status = L2CAP_CS_NO_INFO;
+
+       l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
+                      sizeof(rsp), &rsp);
+
+       return 0;
+}
+
+static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
+                                       struct l2cap_cmd_hdr *cmd, void *data)
+{
+       BT_DBG("conn %p", conn);
+
+       return l2cap_connect_rsp(conn, cmd, data);
+}
+
+static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
+                                                       u16 icid, u16 result)
+{
+       struct l2cap_move_chan_rsp rsp;
+
+       BT_DBG("icid %d, result %d", icid, result);
+
+       rsp.icid = cpu_to_le16(icid);
+       rsp.result = cpu_to_le16(result);
+
+       l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
+}
+
+static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
+                               struct l2cap_chan *chan, u16 icid, u16 result)
+{
+       struct l2cap_move_chan_cfm cfm;
+       u8 ident;
+
+       BT_DBG("icid %d, result %d", icid, result);
+
+       ident = l2cap_get_ident(conn);
+       if (chan)
+               chan->ident = ident;
+
+       cfm.icid = cpu_to_le16(icid);
+       cfm.result = cpu_to_le16(result);
+
+       l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
+}
+
+static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
+                                                               u16 icid)
+{
+       struct l2cap_move_chan_cfm_rsp rsp;
+
+       BT_DBG("icid %d", icid);
+
+       rsp.icid = cpu_to_le16(icid);
+       l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
+}
+
+static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
+                       struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+       struct l2cap_move_chan_req *req = data;
+       u16 icid = 0;
+       u16 result = L2CAP_MR_NOT_ALLOWED;
+
+       if (cmd_len != sizeof(*req))
+               return -EPROTO;
+
+       icid = le16_to_cpu(req->icid);
+
+       BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+
+       if (!enable_hs)
+               return -EINVAL;
+
+       /* Placeholder: Always refuse */
+       l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
+
+       return 0;
+}
+
+static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
+                       struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+       struct l2cap_move_chan_rsp *rsp = data;
+       u16 icid, result;
+
+       if (cmd_len != sizeof(*rsp))
+               return -EPROTO;
+
+       icid = le16_to_cpu(rsp->icid);
+       result = le16_to_cpu(rsp->result);
+
+       BT_DBG("icid %d, result %d", icid, result);
+
+       /* Placeholder: Always unconfirmed */
+       l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
+
+       return 0;
+}
+
+static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
+                       struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+       struct l2cap_move_chan_cfm *cfm = data;
+       u16 icid, result;
+
+       if (cmd_len != sizeof(*cfm))
+               return -EPROTO;
+
+       icid = le16_to_cpu(cfm->icid);
+       result = le16_to_cpu(cfm->result);
+
+       BT_DBG("icid %d, result %d", icid, result);
+
+       l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
+
+       return 0;
+}
+
+static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
+                       struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+{
+       struct l2cap_move_chan_cfm_rsp *rsp = data;
+       u16 icid;
+
+       if (cmd_len != sizeof(*rsp))
+               return -EPROTO;
+
+       icid = le16_to_cpu(rsp->icid);
+
+       BT_DBG("icid %d", icid);
+
+       return 0;
+}
+
 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
                                                        u16 to_multiplier)
 {
@@ -2969,6 +3429,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
                err = l2cap_information_rsp(conn, cmd, data);
                break;
 
+       case L2CAP_CREATE_CHAN_REQ:
+               err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
+               break;
+
+       case L2CAP_CREATE_CHAN_RSP:
+               err = l2cap_create_channel_rsp(conn, cmd, data);
+               break;
+
+       case L2CAP_MOVE_CHAN_REQ:
+               err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
+               break;
+
+       case L2CAP_MOVE_CHAN_RSP:
+               err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+               break;
+
+       case L2CAP_MOVE_CHAN_CFM:
+               err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
+               break;
+
+       case L2CAP_MOVE_CHAN_CFM_RSP:
+               err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+               break;
+
        default:
                BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
                err = -EINVAL;
@@ -3047,10 +3531,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
 {
        u16 our_fcs, rcv_fcs;
-       int hdr_size = L2CAP_HDR_SIZE + 2;
+       int hdr_size;
+
+       if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+               hdr_size = L2CAP_EXT_HDR_SIZE;
+       else
+               hdr_size = L2CAP_ENH_HDR_SIZE;
 
        if (chan->fcs == L2CAP_FCS_CRC16) {
-               skb_trim(skb, skb->len - 2);
+               skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
                rcv_fcs = get_unaligned_le16(skb->data + skb->len);
                our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
 
@@ -3062,14 +3551,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
 
 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
 {
-       u16 control = 0;
+       u32 control = 0;
 
        chan->frames_sent = 0;
 
-       control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+       control |= __set_reqseq(chan, chan->buffer_seq);
 
        if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
-               control |= L2CAP_SUPER_RCV_NOT_READY;
+               control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
                l2cap_send_sframe(chan, control);
                set_bit(CONN_RNR_SENT, &chan->conn_state);
        }
@@ -3081,12 +3570,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
 
        if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
                        chan->frames_sent == 0) {
-               control |= L2CAP_SUPER_RCV_READY;
+               control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
                l2cap_send_sframe(chan, control);
        }
 }
 
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
+static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
 {
        struct sk_buff *next_skb;
        int tx_seq_offset, next_tx_seq_offset;
@@ -3095,23 +3584,15 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
        bt_cb(skb)->sar = sar;
 
        next_skb = skb_peek(&chan->srej_q);
-       if (!next_skb) {
-               __skb_queue_tail(&chan->srej_q, skb);
-               return 0;
-       }
 
-       tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
-       if (tx_seq_offset < 0)
-               tx_seq_offset += 64;
+       tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
 
-       do {
+       while (next_skb) {
                if (bt_cb(next_skb)->tx_seq == tx_seq)
                        return -EINVAL;
 
-               next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
-                                               chan->buffer_seq) % 64;
-               if (next_tx_seq_offset < 0)
-                       next_tx_seq_offset += 64;
+               next_tx_seq_offset = __seq_offset(chan,
+                               bt_cb(next_skb)->tx_seq, chan->buffer_seq);
 
                if (next_tx_seq_offset > tx_seq_offset) {
                        __skb_queue_before(&chan->srej_q, next_skb, skb);
@@ -3119,9 +3600,10 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
                }
 
                if (skb_queue_is_last(&chan->srej_q, next_skb))
-                       break;
-
-       } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
+                       next_skb = NULL;
+               else
+                       next_skb = skb_queue_next(&chan->srej_q, next_skb);
+       }
 
        __skb_queue_tail(&chan->srej_q, skb);
 
@@ -3147,24 +3629,24 @@ static void append_skb_frag(struct sk_buff *skb,
        skb->truesize += new_frag->truesize;
 }
 
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
 {
        int err = -EINVAL;
 
-       switch (control & L2CAP_CTRL_SAR) {
-       case L2CAP_SDU_UNSEGMENTED:
+       switch (__get_ctrl_sar(chan, control)) {
+       case L2CAP_SAR_UNSEGMENTED:
                if (chan->sdu)
                        break;
 
                err = chan->ops->recv(chan->data, skb);
                break;
 
-       case L2CAP_SDU_START:
+       case L2CAP_SAR_START:
                if (chan->sdu)
                        break;
 
                chan->sdu_len = get_unaligned_le16(skb->data);
-               skb_pull(skb, 2);
+               skb_pull(skb, L2CAP_SDULEN_SIZE);
 
                if (chan->sdu_len > chan->imtu) {
                        err = -EMSGSIZE;
@@ -3181,7 +3663,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
                err = 0;
                break;
 
-       case L2CAP_SDU_CONTINUE:
+       case L2CAP_SAR_CONTINUE:
                if (!chan->sdu)
                        break;
 
@@ -3195,7 +3677,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
                err = 0;
                break;
 
-       case L2CAP_SDU_END:
+       case L2CAP_SAR_END:
                if (!chan->sdu)
                        break;
 
@@ -3230,14 +3712,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1
 
 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
 {
-       u16 control;
+       u32 control;
 
        BT_DBG("chan %p, Enter local busy", chan);
 
        set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
 
-       control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-       control |= L2CAP_SUPER_RCV_NOT_READY;
+       control = __set_reqseq(chan, chan->buffer_seq);
+       control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
        l2cap_send_sframe(chan, control);
 
        set_bit(CONN_RNR_SENT, &chan->conn_state);
@@ -3247,13 +3729,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
 
 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
 {
-       u16 control;
+       u32 control;
 
        if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
                goto done;
 
-       control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
-       control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
+       control = __set_reqseq(chan, chan->buffer_seq);
+       control |= __set_ctrl_poll(chan);
+       control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
        l2cap_send_sframe(chan, control);
        chan->retry_count = 1;
 
@@ -3279,10 +3762,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
        }
 }
 
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
 {
        struct sk_buff *skb;
-       u16 control;
+       u32 control;
 
        while ((skb = skb_peek(&chan->srej_q)) &&
                        !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
@@ -3292,7 +3775,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
                        break;
 
                skb = skb_dequeue(&chan->srej_q);
-               control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+               control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
                err = l2cap_reassemble_sdu(chan, skb, control);
 
                if (err < 0) {
@@ -3300,16 +3783,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
                        break;
                }
 
-               chan->buffer_seq_srej =
-                       (chan->buffer_seq_srej + 1) % 64;
-               tx_seq = (tx_seq + 1) % 64;
+               chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
+               tx_seq = __next_seq(chan, tx_seq);
        }
 }
 
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
 {
        struct srej_list *l, *tmp;
-       u16 control;
+       u32 control;
 
        list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
                if (l->tx_seq == tx_seq) {
@@ -3317,45 +3799,53 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
                        kfree(l);
                        return;
                }
-               control = L2CAP_SUPER_SELECT_REJECT;
-               control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+               control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+               control |= __set_reqseq(chan, l->tx_seq);
                l2cap_send_sframe(chan, control);
                list_del(&l->list);
                list_add_tail(&l->list, &chan->srej_l);
        }
 }
 
-static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
+static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
 {
        struct srej_list *new;
-       u16 control;
+       u32 control;
 
        while (tx_seq != chan->expected_tx_seq) {
-               control = L2CAP_SUPER_SELECT_REJECT;
-               control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+               control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
+               control |= __set_reqseq(chan, chan->expected_tx_seq);
                l2cap_send_sframe(chan, control);
 
                new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+               if (!new)
+                       return -ENOMEM;
+
                new->tx_seq = chan->expected_tx_seq;
-               chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+               chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
                list_add_tail(&new->list, &chan->srej_l);
        }
-       chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+
+       chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+
+       return 0;
 }
 
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
 {
-       u8 tx_seq = __get_txseq(rx_control);
-       u8 req_seq = __get_reqseq(rx_control);
-       u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+       u16 tx_seq = __get_txseq(chan, rx_control);
+       u16 req_seq = __get_reqseq(chan, rx_control);
+       u8 sar = __get_ctrl_sar(chan, rx_control);
        int tx_seq_offset, expected_tx_seq_offset;
        int num_to_ack = (chan->tx_win/6) + 1;
        int err = 0;
 
-       BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
+       BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
                                                        tx_seq, rx_control);
 
-       if (L2CAP_CTRL_FINAL & rx_control &&
+       if (__is_ctrl_final(chan, rx_control) &&
                        test_bit(CONN_WAIT_F, &chan->conn_state)) {
                __clear_monitor_timer(chan);
                if (chan->unacked_frames > 0)
@@ -3366,9 +3856,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
        chan->expected_ack_seq = req_seq;
        l2cap_drop_acked_frames(chan);
 
-       tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
-       if (tx_seq_offset < 0)
-               tx_seq_offset += 64;
+       tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
 
        /* invalid tx_seq */
        if (tx_seq_offset >= chan->tx_win) {
@@ -3413,13 +3901,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
                                        return 0;
                                }
                        }
-                       l2cap_send_srejframe(chan, tx_seq);
+
+                       err = l2cap_send_srejframe(chan, tx_seq);
+                       if (err < 0) {
+                               l2cap_send_disconn_req(chan->conn, chan, -err);
+                               return err;
+                       }
                }
        } else {
-               expected_tx_seq_offset =
-                       (chan->expected_tx_seq - chan->buffer_seq) % 64;
-               if (expected_tx_seq_offset < 0)
-                       expected_tx_seq_offset += 64;
+               expected_tx_seq_offset = __seq_offset(chan,
+                               chan->expected_tx_seq, chan->buffer_seq);
 
                /* duplicated tx_seq */
                if (tx_seq_offset < expected_tx_seq_offset)
@@ -3437,14 +3928,18 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
 
                set_bit(CONN_SEND_PBIT, &chan->conn_state);
 
-               l2cap_send_srejframe(chan, tx_seq);
+               err = l2cap_send_srejframe(chan, tx_seq);
+               if (err < 0) {
+                       l2cap_send_disconn_req(chan->conn, chan, -err);
+                       return err;
+               }
 
                __clear_ack_timer(chan);
        }
        return 0;
 
 expected:
-       chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
+       chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
 
        if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
                bt_cb(skb)->tx_seq = tx_seq;
@@ -3454,22 +3949,24 @@ expected:
        }
 
        err = l2cap_reassemble_sdu(chan, skb, rx_control);
-       chan->buffer_seq = (chan->buffer_seq + 1) % 64;
+       chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+
        if (err < 0) {
                l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
                return err;
        }
 
-       if (rx_control & L2CAP_CTRL_FINAL) {
+       if (__is_ctrl_final(chan, rx_control)) {
                if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
                        l2cap_retransmit_frames(chan);
        }
 
-       __set_ack_timer(chan);
 
        chan->num_acked = (chan->num_acked + 1) % num_to_ack;
        if (chan->num_acked == num_to_ack - 1)
                l2cap_send_ack(chan);
+       else
+               __set_ack_timer(chan);
 
        return 0;
 
@@ -3478,15 +3975,15 @@ drop:
        return 0;
 }
 
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
 {
-       BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
-                                               rx_control);
+       BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
+                               __get_reqseq(chan, rx_control), rx_control);
 
-       chan->expected_ack_seq = __get_reqseq(rx_control);
+       chan->expected_ack_seq = __get_reqseq(chan, rx_control);
        l2cap_drop_acked_frames(chan);
 
-       if (rx_control & L2CAP_CTRL_POLL) {
+       if (__is_ctrl_poll(chan, rx_control)) {
                set_bit(CONN_SEND_FBIT, &chan->conn_state);
                if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
                        if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
@@ -3499,7 +3996,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
                        l2cap_send_i_or_rr_or_rnr(chan);
                }
 
-       } else if (rx_control & L2CAP_CTRL_FINAL) {
+       } else if (__is_ctrl_final(chan, rx_control)) {
                clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
                if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
@@ -3518,18 +4015,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
        }
 }
 
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
 {
-       u8 tx_seq = __get_reqseq(rx_control);
+       u16 tx_seq = __get_reqseq(chan, rx_control);
 
-       BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+       BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
 
        clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
        chan->expected_ack_seq = tx_seq;
        l2cap_drop_acked_frames(chan);
 
-       if (rx_control & L2CAP_CTRL_FINAL) {
+       if (__is_ctrl_final(chan, rx_control)) {
                if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
                        l2cap_retransmit_frames(chan);
        } else {
@@ -3539,15 +4036,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
                        set_bit(CONN_REJ_ACT, &chan->conn_state);
        }
 }
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
 {
-       u8 tx_seq = __get_reqseq(rx_control);
+       u16 tx_seq = __get_reqseq(chan, rx_control);
 
-       BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+       BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
 
        clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
 
-       if (rx_control & L2CAP_CTRL_POLL) {
+       if (__is_ctrl_poll(chan, rx_control)) {
                chan->expected_ack_seq = tx_seq;
                l2cap_drop_acked_frames(chan);
 
@@ -3560,7 +4057,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
                        chan->srej_save_reqseq = tx_seq;
                        set_bit(CONN_SREJ_ACT, &chan->conn_state);
                }
-       } else if (rx_control & L2CAP_CTRL_FINAL) {
+       } else if (__is_ctrl_final(chan, rx_control)) {
                if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
                                chan->srej_save_reqseq == tx_seq)
                        clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -3575,37 +4072,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
        }
 }
 
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
+static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
 {
-       u8 tx_seq = __get_reqseq(rx_control);
+       u16 tx_seq = __get_reqseq(chan, rx_control);
 
-       BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
+       BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
 
        set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
        chan->expected_ack_seq = tx_seq;
        l2cap_drop_acked_frames(chan);
 
-       if (rx_control & L2CAP_CTRL_POLL)
+       if (__is_ctrl_poll(chan, rx_control))
                set_bit(CONN_SEND_FBIT, &chan->conn_state);
 
        if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
                __clear_retrans_timer(chan);
-               if (rx_control & L2CAP_CTRL_POLL)
+               if (__is_ctrl_poll(chan, rx_control))
                        l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
                return;
        }
 
-       if (rx_control & L2CAP_CTRL_POLL)
+       if (__is_ctrl_poll(chan, rx_control)) {
                l2cap_send_srejtail(chan);
-       else
-               l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
+       } else {
+               rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
+               l2cap_send_sframe(chan, rx_control);
+       }
 }
 
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
+static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
 {
-       BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
+       BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
 
-       if (L2CAP_CTRL_FINAL & rx_control &&
+       if (__is_ctrl_final(chan, rx_control) &&
                        test_bit(CONN_WAIT_F, &chan->conn_state)) {
                __clear_monitor_timer(chan);
                if (chan->unacked_frames > 0)
@@ -3613,20 +4112,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
                clear_bit(CONN_WAIT_F, &chan->conn_state);
        }
 
-       switch (rx_control & L2CAP_CTRL_SUPERVISE) {
-       case L2CAP_SUPER_RCV_READY:
+       switch (__get_ctrl_super(chan, rx_control)) {
+       case L2CAP_SUPER_RR:
                l2cap_data_channel_rrframe(chan, rx_control);
                break;
 
-       case L2CAP_SUPER_REJECT:
+       case L2CAP_SUPER_REJ:
                l2cap_data_channel_rejframe(chan, rx_control);
                break;
 
-       case L2CAP_SUPER_SELECT_REJECT:
+       case L2CAP_SUPER_SREJ:
                l2cap_data_channel_srejframe(chan, rx_control);
                break;
 
-       case L2CAP_SUPER_RCV_NOT_READY:
+       case L2CAP_SUPER_RNR:
                l2cap_data_channel_rnrframe(chan, rx_control);
                break;
        }
@@ -3638,12 +4137,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-       u16 control;
-       u8 req_seq;
+       u32 control;
+       u16 req_seq;
        int len, next_tx_seq_offset, req_seq_offset;
 
-       control = get_unaligned_le16(skb->data);
-       skb_pull(skb, 2);
+       control = __get_control(chan, skb->data);
+       skb_pull(skb, __ctrl_size(chan));
        len = skb->len;
 
        /*
@@ -3654,26 +4153,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
        if (l2cap_check_fcs(chan, skb))
                goto drop;
 
-       if (__is_sar_start(control) && __is_iframe(control))
-               len -= 2;
+       if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+               len -= L2CAP_SDULEN_SIZE;
 
        if (chan->fcs == L2CAP_FCS_CRC16)
-               len -= 2;
+               len -= L2CAP_FCS_SIZE;
 
        if (len > chan->mps) {
                l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
                goto drop;
        }
 
-       req_seq = __get_reqseq(control);
-       req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
-       if (req_seq_offset < 0)
-               req_seq_offset += 64;
+       req_seq = __get_reqseq(chan, control);
 
-       next_tx_seq_offset =
-               (chan->next_tx_seq - chan->expected_ack_seq) % 64;
-       if (next_tx_seq_offset < 0)
-               next_tx_seq_offset += 64;
+       req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
+
+       next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
+                                               chan->expected_ack_seq);
 
        /* check for invalid req-seq */
        if (req_seq_offset > next_tx_seq_offset) {
@@ -3681,7 +4177,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
                goto drop;
        }
 
-       if (__is_iframe(control)) {
+       if (!__is_sframe(chan, control)) {
                if (len < 0) {
                        l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
                        goto drop;
@@ -3709,8 +4205,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
 {
        struct l2cap_chan *chan;
        struct sock *sk = NULL;
-       u16 control;
-       u8 tx_seq;
+       u32 control;
+       u16 tx_seq;
        int len;
 
        chan = l2cap_get_chan_by_scid(conn, cid);
@@ -3751,23 +4247,23 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
                goto done;
 
        case L2CAP_MODE_STREAMING:
-               control = get_unaligned_le16(skb->data);
-               skb_pull(skb, 2);
+               control = __get_control(chan, skb->data);
+               skb_pull(skb, __ctrl_size(chan));
                len = skb->len;
 
                if (l2cap_check_fcs(chan, skb))
                        goto drop;
 
-               if (__is_sar_start(control))
-                       len -= 2;
+               if (__is_sar_start(chan, control))
+                       len -= L2CAP_SDULEN_SIZE;
 
                if (chan->fcs == L2CAP_FCS_CRC16)
-                       len -= 2;
+                       len -= L2CAP_FCS_SIZE;
 
-               if (len > chan->mps || len < 0 || __is_sframe(control))
+               if (len > chan->mps || len < 0 || __is_sframe(chan, control))
                        goto drop;
 
-               tx_seq = __get_txseq(control);
+               tx_seq = __get_txseq(chan, control);
 
                if (chan->expected_tx_seq != tx_seq) {
                        /* Frame(s) missing - must discard partial SDU */
@@ -3779,7 +4275,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
                        /* TODO: Notify userland of missing data */
                }
 
-               chan->expected_tx_seq = (tx_seq + 1) % 64;
+               chan->expected_tx_seq = __next_seq(chan, tx_seq);
 
                if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
                        l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3933,12 +4429,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
 
                if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
                        lm1 |= HCI_LM_ACCEPT;
-                       if (c->role_switch)
+                       if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
                                lm1 |= HCI_LM_MASTER;
                        exact++;
                } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
                        lm2 |= HCI_LM_ACCEPT;
-                       if (c->role_switch)
+                       if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
                                lm2 |= HCI_LM_MASTER;
                }
        }
@@ -3973,7 +4469,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
        BT_DBG("hcon %p", hcon);
 
        if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
-               return 0x13;
+               return HCI_ERROR_REMOTE_USER_TERM;
 
        return conn->disc_reason;
 }
index 5c406d3..f737043 100644 (file)
@@ -334,7 +334,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
                opts.mode     = chan->mode;
                opts.fcs      = chan->fcs;
                opts.max_tx   = chan->max_tx;
-               opts.txwin_size = (__u16)chan->tx_win;
+               opts.txwin_size = chan->tx_win;
 
                len = min_t(unsigned int, len, sizeof(opts));
                if (copy_to_user(optval, (char *) &opts, len))
@@ -359,10 +359,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
                        break;
                }
 
-               if (chan->role_switch)
+               if (test_bit(FLAG_ROLE_SWITCH, &chan->flags))
                        opt |= L2CAP_LM_MASTER;
 
-               if (chan->force_reliable)
+               if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
                        opt |= L2CAP_LM_RELIABLE;
 
                if (put_user(opt, (u32 __user *) optval))
@@ -449,7 +449,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
                break;
 
        case BT_FLUSHABLE:
-               if (put_user(chan->flushable, (u32 __user *) optval))
+               if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags),
+                                               (u32 __user *) optval))
                        err = -EFAULT;
 
                break;
@@ -461,7 +462,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
                        break;
                }
 
-               pwr.force_active = chan->force_active;
+               pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
 
                len = min_t(unsigned int, len, sizeof(pwr));
                if (copy_to_user(optval, (char *) &pwr, len))
@@ -469,6 +470,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
 
                break;
 
+       case BT_CHANNEL_POLICY:
+               if (!enable_hs) {
+                       err = -ENOPROTOOPT;
+                       break;
+               }
+
+               if (put_user(chan->chan_policy, (u32 __user *) optval))
+                       err = -EFAULT;
+               break;
+
        default:
                err = -ENOPROTOOPT;
                break;
@@ -503,7 +514,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
                opts.mode     = chan->mode;
                opts.fcs      = chan->fcs;
                opts.max_tx   = chan->max_tx;
-               opts.txwin_size = (__u16)chan->tx_win;
+               opts.txwin_size = chan->tx_win;
 
                len = min_t(unsigned int, sizeof(opts), optlen);
                if (copy_from_user((char *) &opts, optval, len)) {
@@ -511,7 +522,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
                        break;
                }
 
-               if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
+               if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
                        err = -EINVAL;
                        break;
                }
@@ -535,7 +546,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
                chan->omtu = opts.omtu;
                chan->fcs  = opts.fcs;
                chan->max_tx = opts.max_tx;
-               chan->tx_win = (__u8)opts.txwin_size;
+               chan->tx_win = opts.txwin_size;
                break;
 
        case L2CAP_LM:
@@ -551,8 +562,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
                if (opt & L2CAP_LM_SECURE)
                        chan->sec_level = BT_SECURITY_HIGH;
 
-               chan->role_switch    = (opt & L2CAP_LM_MASTER);
-               chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
+               if (opt & L2CAP_LM_MASTER)
+                       set_bit(FLAG_ROLE_SWITCH, &chan->flags);
+               else
+                       clear_bit(FLAG_ROLE_SWITCH, &chan->flags);
+
+               if (opt & L2CAP_LM_RELIABLE)
+                       set_bit(FLAG_FORCE_RELIABLE, &chan->flags);
+               else
+                       clear_bit(FLAG_FORCE_RELIABLE, &chan->flags);
                break;
 
        default:
@@ -608,8 +626,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
 
                chan->sec_level = sec.level;
 
+               if (!chan->conn)
+                       break;
+
                conn = chan->conn;
-               if (conn && chan->scid == L2CAP_CID_LE_DATA) {
+
+               /*change security for LE channels */
+               if (chan->scid == L2CAP_CID_LE_DATA) {
                        if (!conn->hcon->out) {
                                err = -EINVAL;
                                break;
@@ -617,9 +640,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
 
                        if (smp_conn_security(conn, sec.level))
                                break;
-
-                       err = 0;
                        sk->sk_state = BT_CONFIG;
+
+               /* or for ACL link, under defer_setup time */
+               } else if (sk->sk_state == BT_CONNECT2 &&
+                                       bt_sk(sk)->defer_setup) {
+                       err = l2cap_chan_check_security(chan);
+               } else {
+                       err = -EINVAL;
                }
                break;
 
@@ -658,7 +686,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
                        }
                }
 
-               chan->flushable = opt;
+               if (opt)
+                       set_bit(FLAG_FLUSHABLE, &chan->flags);
+               else
+                       clear_bit(FLAG_FLUSHABLE, &chan->flags);
                break;
 
        case BT_POWER:
@@ -675,7 +706,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
                        err = -EFAULT;
                        break;
                }
-               chan->force_active = pwr.force_active;
+
+               if (pwr.force_active)
+                       set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+               else
+                       clear_bit(FLAG_FORCE_ACTIVE, &chan->flags);
+               break;
+
+       case BT_CHANNEL_POLICY:
+               if (!enable_hs) {
+                       err = -ENOPROTOOPT;
+                       break;
+               }
+
+               if (get_user(opt, (u32 __user *) optval)) {
+                       err = -EFAULT;
+                       break;
+               }
+
+               if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (chan->mode != L2CAP_MODE_ERTM &&
+                               chan->mode != L2CAP_MODE_STREAMING) {
+                       err = -EOPNOTSUPP;
+                       break;
+               }
+
+               chan->chan_policy = (u8) opt;
                break;
 
        default:
@@ -709,7 +769,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
                return -ENOTCONN;
        }
 
-       err = l2cap_chan_send(chan, msg, len);
+       err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
 
        release_sock(sk);
        return err;
@@ -931,11 +991,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
                chan->fcs  = pchan->fcs;
                chan->max_tx = pchan->max_tx;
                chan->tx_win = pchan->tx_win;
+               chan->tx_win_max = pchan->tx_win_max;
                chan->sec_level = pchan->sec_level;
-               chan->role_switch = pchan->role_switch;
-               chan->force_reliable = pchan->force_reliable;
-               chan->flushable = pchan->flushable;
-               chan->force_active = pchan->force_active;
+               chan->flags = pchan->flags;
 
                security_sk_clone(parent, sk);
        } else {
@@ -964,12 +1022,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
                chan->max_tx = L2CAP_DEFAULT_MAX_TX;
                chan->fcs  = L2CAP_FCS_CRC16;
                chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
+               chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
                chan->sec_level = BT_SECURITY_LOW;
-               chan->role_switch = 0;
-               chan->force_reliable = 0;
-               chan->flushable = BT_FLUSHABLE_OFF;
-               chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
-
+               chan->flags = 0;
+               set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
        }
 
        /* Default config options */
index 2c76342..1ce549b 100644 (file)
@@ -22,6 +22,7 @@
 
 /* Bluetooth HCI Management interface */
 
+#include <linux/kernel.h>
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <asm/unaligned.h>
 #define MGMT_VERSION   0
 #define MGMT_REVISION  1
 
+#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */
+
 struct pending_cmd {
        struct list_head list;
-       __u16 opcode;
+       u16 opcode;
        int index;
        void *param;
        struct sock *sk;
        void *user_data;
 };
 
-static LIST_HEAD(cmd_list);
+/* HCI to MGMT error code conversion table */
+static u8 mgmt_status_table[] = {
+       MGMT_STATUS_SUCCESS,
+       MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
+       MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
+       MGMT_STATUS_FAILED,             /* Hardware Failure */
+       MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
+       MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
+       MGMT_STATUS_NOT_PAIRED,         /* PIN or Key Missing */
+       MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
+       MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
+       MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
+       MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
+       MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
+       MGMT_STATUS_BUSY,               /* Command Disallowed */
+       MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
+       MGMT_STATUS_REJECTED,           /* Rejected Security */
+       MGMT_STATUS_REJECTED,           /* Rejected Personal */
+       MGMT_STATUS_TIMEOUT,            /* Host Timeout */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
+       MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
+       MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
+       MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
+       MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
+       MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
+       MGMT_STATUS_BUSY,               /* Repeated Attempts */
+       MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
+       MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
+       MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
+       MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
+       MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
+       MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
+       MGMT_STATUS_FAILED,             /* Unspecified Error */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
+       MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
+       MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
+       MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
+       MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
+       MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
+       MGMT_STATUS_FAILED,             /* Unit Link Key Used */
+       MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
+       MGMT_STATUS_TIMEOUT,            /* Instant Passed */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
+       MGMT_STATUS_FAILED,             /* Transaction Collision */
+       MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
+       MGMT_STATUS_REJECTED,           /* QoS Rejected */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
+       MGMT_STATUS_REJECTED,           /* Insufficient Security */
+       MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
+       MGMT_STATUS_BUSY,               /* Role Switch Pending */
+       MGMT_STATUS_FAILED,             /* Slot Violation */
+       MGMT_STATUS_FAILED,             /* Role Switch Failed */
+       MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
+       MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
+       MGMT_STATUS_BUSY,               /* Host Busy Pairing */
+       MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
+       MGMT_STATUS_BUSY,               /* Controller Busy */
+       MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
+       MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
+       MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
+       MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
+       MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
+};
+
+static u8 mgmt_status(u8 hci_status)
+{
+       if (hci_status < ARRAY_SIZE(mgmt_status_table))
+               return mgmt_status_table[hci_status];
+
+       return MGMT_STATUS_FAILED;
+}
 
 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
        struct mgmt_ev_cmd_status *ev;
+       int err;
 
        BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
 
@@ -66,10 +141,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
        ev->status = status;
        put_unaligned_le16(cmd, &ev->opcode);
 
-       if (sock_queue_rcv_skb(sk, skb) < 0)
+       err = sock_queue_rcv_skb(sk, skb);
+       if (err < 0)
                kfree_skb(skb);
 
-       return 0;
+       return err;
 }
 
 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
@@ -78,6 +154,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
        struct mgmt_ev_cmd_complete *ev;
+       int err;
 
        BT_DBG("sock %p", sk);
 
@@ -97,10 +174,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp,
        if (rp)
                memcpy(ev->data, rp, rp_len);
 
-       if (sock_queue_rcv_skb(sk, skb) < 0)
+       err = sock_queue_rcv_skb(sk, skb);
+       if (err < 0)
                kfree_skb(skb);
 
-       return 0;
+       return err;;
 }
 
 static int read_version(struct sock *sk)
@@ -120,6 +198,7 @@ static int read_index_list(struct sock *sk)
 {
        struct mgmt_rp_read_index_list *rp;
        struct list_head *p;
+       struct hci_dev *d;
        size_t rp_len;
        u16 count;
        int i, err;
@@ -143,10 +222,9 @@ static int read_index_list(struct sock *sk)
        put_unaligned_le16(count, &rp->num_controllers);
 
        i = 0;
-       list_for_each(p, &hci_dev_list) {
-               struct hci_dev *d = list_entry(p, struct hci_dev, list);
-
-               hci_del_off_timer(d);
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags))
+                       cancel_delayed_work(&d->power_off);
 
                if (test_bit(HCI_SETUP, &d->flags))
                        continue;
@@ -174,9 +252,11 @@ static int read_controller_info(struct sock *sk, u16 index)
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_READ_INFO,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
-       hci_del_off_timer(hdev);
+       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
+               cancel_delayed_work_sync(&hdev->power_off);
 
        hci_dev_lock_bh(hdev);
 
@@ -221,7 +301,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd)
 }
 
 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
-                                               u16 index, void *data, u16 len)
+                                                       struct hci_dev *hdev,
+                                                       void *data, u16 len)
 {
        struct pending_cmd *cmd;
 
@@ -230,7 +311,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
                return NULL;
 
        cmd->opcode = opcode;
-       cmd->index = index;
+       cmd->index = hdev->id;
 
        cmd->param = kmalloc(len, GFP_ATOMIC);
        if (!cmd->param) {
@@ -244,48 +325,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
        cmd->sk = sk;
        sock_hold(sk);
 
-       list_add(&cmd->list, &cmd_list);
+       list_add(&cmd->list, &hdev->mgmt_pending);
 
        return cmd;
 }
 
-static void mgmt_pending_foreach(u16 opcode, int index,
+static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
                                void (*cb)(struct pending_cmd *cmd, void *data),
                                void *data)
 {
        struct list_head *p, *n;
 
-       list_for_each_safe(p, n, &cmd_list) {
+       list_for_each_safe(p, n, &hdev->mgmt_pending) {
                struct pending_cmd *cmd;
 
                cmd = list_entry(p, struct pending_cmd, list);
 
-               if (cmd->opcode != opcode)
-                       continue;
-
-               if (index >= 0 && cmd->index != index)
+               if (opcode > 0 && cmd->opcode != opcode)
                        continue;
 
                cb(cmd, data);
        }
 }
 
-static struct pending_cmd *mgmt_pending_find(u16 opcode, int index)
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
 {
-       struct list_head *p;
-
-       list_for_each(p, &cmd_list) {
-               struct pending_cmd *cmd;
-
-               cmd = list_entry(p, struct pending_cmd, list);
-
-               if (cmd->opcode != opcode)
-                       continue;
-
-               if (index >= 0 && cmd->index != index)
-                       continue;
+       struct pending_cmd *cmd;
 
-               return cmd;
+       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+               if (cmd->opcode == opcode)
+                       return cmd;
        }
 
        return NULL;
@@ -297,6 +366,15 @@ static void mgmt_pending_remove(struct pending_cmd *cmd)
        mgmt_pending_free(cmd);
 }
 
+static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
+{
+       struct mgmt_mode rp;
+
+       rp.val = val;
+
+       return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
+}
+
 static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
 {
        struct mgmt_mode *cp;
@@ -309,26 +387,29 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_POWERED,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        up = test_bit(HCI_UP, &hdev->flags);
        if ((cp->val && up) || (!cp->val && !up)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY);
+               err = send_mode_rsp(sk, index, MGMT_OP_SET_POWERED, cp->val);
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY);
+       if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_POWERED,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -337,7 +418,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
        if (cp->val)
                queue_work(hdev->workqueue, &hdev->power_on);
        else
-               queue_work(hdev->workqueue, &hdev->power_off);
+               queue_work(hdev->workqueue, &hdev->power_off.work);
 
        err = 0;
 
@@ -350,7 +431,7 @@ failed:
 static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
                                                                        u16 len)
 {
-       struct mgmt_mode *cp;
+       struct mgmt_cp_set_discoverable *cp;
        struct hci_dev *hdev;
        struct pending_cmd *cmd;
        u8 scan;
@@ -361,32 +442,37 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
-                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY);
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
        if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) &&
                                        test_bit(HCI_PSCAN, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY);
+               err = send_mode_rsp(sk, index, MGMT_OP_SET_DISCOVERABLE,
+                                                               cp->val);
                goto failed;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -396,11 +482,16 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
 
        if (cp->val)
                scan |= SCAN_INQUIRY;
+       else
+               cancel_delayed_work(&hdev->discov_off);
 
        err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
+       if (cp->val)
+               hdev->discov_timeout = get_unaligned_le16(&cp->timeout);
+
 failed:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
@@ -422,31 +513,36 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) ||
-                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY);
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
+                       mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
        if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY);
+               err = send_mode_rsp(sk, index, MGMT_OP_SET_CONNECTABLE,
+                                                               cp->val);
                goto failed;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -468,8 +564,8 @@ failed:
        return err;
 }
 
-static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
-                                                       struct sock *skip_sk)
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data,
+                                       u16 data_len, struct sock *skip_sk)
 {
        struct sk_buff *skb;
        struct mgmt_hdr *hdr;
@@ -482,7 +578,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
 
        hdr = (void *) skb_put(skb, sizeof(*hdr));
        hdr->opcode = cpu_to_le16(event);
-       hdr->index = cpu_to_le16(index);
+       if (hdev)
+               hdr->index = cpu_to_le16(hdev->id);
+       else
+               hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
        hdr->len = cpu_to_le16(data_len);
 
        if (data)
@@ -494,15 +593,6 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len,
        return 0;
 }
 
-static int send_mode_rsp(struct sock *sk, u16 opcode, u16 index, u8 val)
-{
-       struct mgmt_mode rp;
-
-       rp.val = val;
-
-       return cmd_complete(sk, index, opcode, &rp, sizeof(rp));
-}
-
 static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
                                                                        u16 len)
 {
@@ -515,11 +605,13 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -534,7 +626,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
 
        ev.val = cp->val;
 
-       err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
+       err = mgmt_event(MGMT_EV_PAIRABLE, hdev, &ev, sizeof(ev), sk);
 
 failed:
        hci_dev_unlock_bh(hdev);
@@ -587,7 +679,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
        u16 eir_len = 0;
        u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
        int i, truncated = 0;
-       struct list_head *p;
+       struct bt_uuid *uuid;
        size_t name_len;
 
        name_len = strlen(hdev->dev_name);
@@ -612,8 +704,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data)
        memset(uuid16_list, 0, sizeof(uuid16_list));
 
        /* Group all UUID16 types */
-       list_for_each(p, &hdev->uuids) {
-               struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
+       list_for_each_entry(uuid, &hdev->uuids, list) {
                u16 uuid16;
 
                uuid16 = get_uuid16(uuid->uuid);
@@ -689,14 +780,11 @@ static int update_eir(struct hci_dev *hdev)
 
 static u8 get_service_classes(struct hci_dev *hdev)
 {
-       struct list_head *p;
+       struct bt_uuid *uuid;
        u8 val = 0;
 
-       list_for_each(p, &hdev->uuids) {
-               struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
-
+       list_for_each_entry(uuid, &hdev->uuids, list)
                val |= uuid->svc_hint;
-       }
 
        return val;
 }
@@ -732,11 +820,13 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_ADD_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -781,11 +871,13 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -807,7 +899,8 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
        }
 
        if (found == 0) {
-               err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT);
+               err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID,
+                                               MGMT_STATUS_INVALID_PARAMS);
                goto unlock;
        }
 
@@ -840,11 +933,13 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("request for hci%u", index);
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -872,11 +967,13 @@ static int set_service_cache(struct sock *sk, u16 index,  unsigned char *data,
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -895,6 +992,9 @@ static int set_service_cache(struct sock *sk, u16 index,  unsigned char *data,
        if (err == 0)
                err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
                                                                        0);
+       else
+               cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, -err);
+
 
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
@@ -902,30 +1002,35 @@ static int set_service_cache(struct sock *sk, u16 index,  unsigned char *data,
        return err;
 }
 
-static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int load_link_keys(struct sock *sk, u16 index, unsigned char *data,
+                                                               u16 len)
 {
        struct hci_dev *hdev;
-       struct mgmt_cp_load_keys *cp;
+       struct mgmt_cp_load_link_keys *cp;
        u16 key_count, expected_len;
        int i;
 
        cp = (void *) data;
 
        if (len < sizeof(*cp))
-               return -EINVAL;
+               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        key_count = get_unaligned_le16(&cp->key_count);
 
-       expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
+       expected_len = sizeof(*cp) + key_count *
+                                       sizeof(struct mgmt_link_key_info);
        if (expected_len != len) {
-               BT_ERR("load_keys: expected %u bytes, got %u bytes",
+               BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
                                                        len, expected_len);
-               return -EINVAL;
+               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
        }
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
                                                                key_count);
@@ -942,57 +1047,83 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
                clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
 
        for (i = 0; i < key_count; i++) {
-               struct mgmt_key_info *key = &cp->keys[i];
+               struct mgmt_link_key_info *key = &cp->keys[i];
 
                hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
                                                                key->pin_len);
        }
 
+       cmd_complete(sk, index, MGMT_OP_LOAD_LINK_KEYS, NULL, 0);
+
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
        return 0;
 }
 
-static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
+static int remove_keys(struct sock *sk, u16 index, unsigned char *data,
+                                                               u16 len)
 {
        struct hci_dev *hdev;
-       struct mgmt_cp_remove_key *cp;
+       struct mgmt_cp_remove_keys *cp;
+       struct mgmt_rp_remove_keys rp;
+       struct hci_cp_disconnect dc;
+       struct pending_cmd *cmd;
        struct hci_conn *conn;
        int err;
 
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.bdaddr, &cp->bdaddr);
+       rp.status = MGMT_STATUS_FAILED;
+
        err = hci_remove_link_key(hdev, &cp->bdaddr);
        if (err < 0) {
-               err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err);
+               rp.status = MGMT_STATUS_NOT_PAIRED;
                goto unlock;
        }
 
-       err = 0;
-
-       if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect)
+       if (!test_bit(HCI_UP, &hdev->flags) || !cp->disconnect) {
+               err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
                goto unlock;
+       }
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
-       if (conn) {
-               struct hci_cp_disconnect dc;
+       if (!conn) {
+               err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
+               goto unlock;
+       }
 
-               put_unaligned_le16(conn->handle, &dc.handle);
-               dc.reason = 0x13; /* Remote User Terminated Connection */
-               err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_KEYS, hdev, cp, sizeof(*cp));
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
        }
 
+       put_unaligned_le16(conn->handle, &dc.handle);
+       dc.reason = 0x13; /* Remote User Terminated Connection */
+       err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
 unlock:
+       if (err < 0)
+               err = cmd_complete(sk, index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
@@ -1013,21 +1144,25 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) {
-               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY);
+       if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                                       MGMT_STATUS_BUSY);
                goto failed;
        }
 
@@ -1036,11 +1171,12 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
                conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
 
        if (!conn) {
-               err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
+               err = cmd_status(sk, index, MGMT_OP_DISCONNECT,
+                                               MGMT_STATUS_NOT_CONNECTED);
                goto failed;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -1060,10 +1196,30 @@ failed:
        return err;
 }
 
+static u8 link_to_mgmt(u8 link_type, u8 addr_type)
+{
+       switch (link_type) {
+       case LE_LINK:
+               switch (addr_type) {
+               case ADDR_LE_DEV_PUBLIC:
+                       return MGMT_ADDR_LE_PUBLIC;
+               case ADDR_LE_DEV_RANDOM:
+                       return MGMT_ADDR_LE_RANDOM;
+               default:
+                       return MGMT_ADDR_INVALID;
+               }
+       case ACL_LINK:
+               return MGMT_ADDR_BREDR;
+       default:
+               return MGMT_ADDR_INVALID;
+       }
+}
+
 static int get_connections(struct sock *sk, u16 index)
 {
        struct mgmt_rp_get_connections *rp;
        struct hci_dev *hdev;
+       struct hci_conn *c;
        struct list_head *p;
        size_t rp_len;
        u16 count;
@@ -1073,7 +1229,8 @@ static int get_connections(struct sock *sk, u16 index)
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1082,7 +1239,7 @@ static int get_connections(struct sock *sk, u16 index)
                count++;
        }
 
-       rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t));
+       rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info));
        rp = kmalloc(rp_len, GFP_ATOMIC);
        if (!rp) {
                err = -ENOMEM;
@@ -1092,12 +1249,17 @@ static int get_connections(struct sock *sk, u16 index)
        put_unaligned_le16(count, &rp->conn_count);
 
        i = 0;
-       list_for_each(p, &hdev->conn_hash.list) {
-               struct hci_conn *c = list_entry(p, struct hci_conn, list);
-
-               bacpy(&rp->conn[i++], &c->dst);
+       list_for_each_entry(c, &hdev->conn_hash.list, list) {
+               bacpy(&rp->addr[i].bdaddr, &c->dst);
+               rp->addr[i].type = link_to_mgmt(c->type, c->dst_type);
+               if (rp->addr[i].type == MGMT_ADDR_INVALID)
+                       continue;
+               i++;
        }
 
+       /* Recalculate length in case of filtered SCO connections, etc */
+       rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
+
        err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
 
 unlock:
@@ -1113,7 +1275,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index,
        struct pending_cmd *cmd;
        int err;
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
+       cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
                                                                sizeof(*cp));
        if (!cmd)
                return -ENOMEM;
@@ -1142,22 +1304,26 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
+               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
        if (!conn) {
-               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
+               err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
+                                               MGMT_STATUS_NOT_CONNECTED);
                goto failed;
        }
 
@@ -1169,12 +1335,12 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
                err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
                if (err >= 0)
                        err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
-                                                               EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
                goto failed;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -1208,18 +1374,18 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-                                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
                err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
-                                                               ENETDOWN);
+                                               MGMT_STATUS_NOT_POWERED);
                goto failed;
        }
 
@@ -1243,11 +1409,13 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1265,19 +1433,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
 static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
-       struct list_head *p;
-
-       list_for_each(p, &cmd_list) {
-               struct pending_cmd *cmd;
-
-               cmd = list_entry(p, struct pending_cmd, list);
+       struct pending_cmd *cmd;
 
+       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
                if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
                        continue;
 
-               if (cmd->index != hdev->id)
-                       continue;
-
                if (cmd->user_data != conn)
                        continue;
 
@@ -1292,7 +1453,8 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
        struct mgmt_rp_pair_device rp;
        struct hci_conn *conn = cmd->user_data;
 
-       bacpy(&rp.bdaddr, &conn->dst);
+       bacpy(&rp.addr.bdaddr, &conn->dst);
+       rp.addr.type = link_to_mgmt(conn->type, conn->dst_type);
        rp.status = status;
 
        cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp));
@@ -1314,20 +1476,18 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
        BT_DBG("status %u", status);
 
        cmd = find_pairing(conn);
-       if (!cmd) {
+       if (!cmd)
                BT_DBG("Unable to find a pending command");
-               return;
-       }
-
-       pairing_complete(cmd, status);
+       else
+               pairing_complete(cmd, status);
 }
 
 static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
 {
        struct hci_dev *hdev;
        struct mgmt_cp_pair_device *cp;
+       struct mgmt_rp_pair_device rp;
        struct pending_cmd *cmd;
-       struct adv_entry *entry;
        u8 sec_level, auth_type;
        struct hci_conn *conn;
        int err;
@@ -1337,11 +1497,13 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
        cp = (void *) data;
 
        if (len != sizeof(*cp))
-               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
@@ -1351,26 +1513,33 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
        else
                auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 
-       entry = hci_find_adv_entry(hdev, &cp->bdaddr);
-       if (entry)
-               conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+       if (cp->addr.type == MGMT_ADDR_BREDR)
+               conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, sec_level,
                                                                auth_type);
        else
-               conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+               conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, sec_level,
                                                                auth_type);
 
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+       rp.addr.type = cp->addr.type;
+
        if (IS_ERR(conn)) {
-               err = PTR_ERR(conn);
+               rp.status = -PTR_ERR(conn);
+               err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+                                                       &rp, sizeof(rp));
                goto unlock;
        }
 
        if (conn->connect_cfm_cb) {
                hci_conn_put(conn);
-               err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY);
+               rp.status = EBUSY;
+               err = cmd_complete(sk, index, MGMT_OP_PAIR_DEVICE,
+                                                       &rp, sizeof(rp));
                goto unlock;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                hci_conn_put(conn);
@@ -1378,7 +1547,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
        }
 
        /* For LE, just connecting isn't a proof that the pairing finished */
-       if (!entry)
+       if (cp->addr.type == MGMT_ADDR_BREDR)
                conn->connect_cfm_cb = pairing_complete_cb;
 
        conn->security_cfm_cb = pairing_complete_cb;
@@ -1399,56 +1568,138 @@ unlock:
        return err;
 }
 
-static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
-                                                       u16 len, int success)
+static int user_pairing_resp(struct sock *sk, u16 index, bdaddr_t *bdaddr,
+                                       u16 mgmt_op, u16 hci_op, __le32 passkey)
 {
-       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
-       u16 mgmt_op, hci_op;
        struct pending_cmd *cmd;
        struct hci_dev *hdev;
+       struct hci_conn *conn;
        int err;
 
-       BT_DBG("");
-
-       if (success) {
-               mgmt_op = MGMT_OP_USER_CONFIRM_REPLY;
-               hci_op = HCI_OP_USER_CONFIRM_REPLY;
-       } else {
-               mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY;
-               hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY;
-       }
-
-       if (len != sizeof(*cp))
-               return cmd_status(sk, index, mgmt_op, EINVAL);
-
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, mgmt_op, ENODEV);
+               return cmd_status(sk, index, mgmt_op,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
-               err = cmd_status(sk, index, mgmt_op, ENETDOWN);
-               goto failed;
+               err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_NOT_POWERED);
+               goto done;
+       }
+
+       /*
+        * Check for an existing ACL link, if present pair via
+        * HCI commands.
+        *
+        * If no ACL link is present, check for an LE link and if
+        * present, pair via the SMP engine.
+        *
+        * If neither ACL nor LE links are present, fail with error.
+        */
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
+       if (!conn) {
+               conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
+               if (!conn) {
+                       err = cmd_status(sk, index, mgmt_op,
+                                               MGMT_STATUS_NOT_CONNECTED);
+                       goto done;
+               }
+
+               /* Continue with pairing via SMP */
+
+               err = cmd_status(sk, index, mgmt_op, MGMT_STATUS_SUCCESS);
+               goto done;
        }
 
-       cmd = mgmt_pending_add(sk, mgmt_op, index, data, len);
+       cmd = mgmt_pending_add(sk, mgmt_op, hdev, bdaddr, sizeof(*bdaddr));
        if (!cmd) {
                err = -ENOMEM;
-               goto failed;
+               goto done;
        }
 
-       err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr);
+       /* Continue with pairing via HCI */
+       if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
+               struct hci_cp_user_passkey_reply cp;
+
+               bacpy(&cp.bdaddr, bdaddr);
+               cp.passkey = passkey;
+               err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
+       } else
+               err = hci_send_cmd(hdev, hci_op, sizeof(*bdaddr), bdaddr);
+
        if (err < 0)
                mgmt_pending_remove(cmd);
 
-failed:
+done:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
        return err;
 }
 
+static int user_confirm_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_CONFIRM_REPLY,
+                       HCI_OP_USER_CONFIRM_REPLY, 0);
+}
+
+static int user_confirm_neg_reply(struct sock *sk, u16 index, void *data,
+                                                                       u16 len)
+{
+       struct mgmt_cp_user_confirm_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_CONFIRM_NEG_REPLY,
+                                               MGMT_STATUS_INVALID_PARAMS);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_CONFIRM_NEG_REPLY,
+                       HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
+}
+
+static int user_passkey_reply(struct sock *sk, u16 index, void *data, u16 len)
+{
+       struct mgmt_cp_user_passkey_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_REPLY,
+                                                                       EINVAL);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_PASSKEY_REPLY,
+                       HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
+}
+
+static int user_passkey_neg_reply(struct sock *sk, u16 index, void *data,
+                                                                       u16 len)
+{
+       struct mgmt_cp_user_passkey_neg_reply *cp = (void *) data;
+
+       BT_DBG("");
+
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_USER_PASSKEY_NEG_REPLY,
+                                                                       EINVAL);
+
+       return user_pairing_resp(sk, index, &cp->bdaddr,
+                       MGMT_OP_USER_PASSKEY_NEG_REPLY,
+                       HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
+}
+
 static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
                                                                u16 len)
 {
@@ -1461,15 +1712,17 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
        BT_DBG("");
 
        if (len != sizeof(*mgmt_cp))
-               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
+               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
@@ -1499,28 +1752,29 @@ static int read_local_oob_data(struct sock *sk, u16 index)
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        if (!test_bit(HCI_UP, &hdev->flags)) {
                err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                               ENETDOWN);
+                                               MGMT_STATUS_NOT_POWERED);
                goto unlock;
        }
 
        if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
                err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                               EOPNOTSUPP);
+                                               MGMT_STATUS_NOT_SUPPORTED);
                goto unlock;
        }
 
-       if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
-               err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
+       if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
+               err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
+                                                       MGMT_STATUS_BUSY);
                goto unlock;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
+       cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
        if (!cmd) {
                err = -ENOMEM;
                goto unlock;
@@ -1548,19 +1802,20 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
                                                                cp->randomizer);
        if (err < 0)
-               err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
+               err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                                       MGMT_STATUS_FAILED);
        else
                err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
                                                                        0);
@@ -1582,19 +1837,19 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
        err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
        if (err < 0)
                err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
-                                                                       -err);
+                                               MGMT_STATUS_INVALID_PARAMS);
        else
                err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
                                                                NULL, 0);
@@ -1605,34 +1860,40 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
        return err;
 }
 
-static int start_discovery(struct sock *sk, u16 index)
+static int start_discovery(struct sock *sk, u16 index,
+                                               unsigned char *data, u16 len)
 {
-       u8 lap[3] = { 0x33, 0x8b, 0x9e };
-       struct hci_cp_inquiry cp;
+       struct mgmt_cp_start_discovery *cp = (void *) data;
        struct pending_cmd *cmd;
        struct hci_dev *hdev;
        int err;
 
        BT_DBG("hci%u", index);
 
+       if (len != sizeof(*cp))
+               return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+                                               MGMT_STATUS_INVALID_PARAMS);
+
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY,
+                                               MGMT_STATUS_NOT_POWERED);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
        }
 
-       memset(&cp, 0, sizeof(cp));
-       memcpy(&cp.lap, lap, 3);
-       cp.length  = 0x08;
-       cp.num_rsp = 0x00;
-
-       err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
+       err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -1653,17 +1914,18 @@ static int stop_discovery(struct sock *sk, u16 index)
 
        hdev = hci_dev_get(index);
        if (!hdev)
-               return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
+               return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY,
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
+       cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
        }
 
-       err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+       err = hci_cancel_inquiry(hdev);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -1678,7 +1940,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
                                                                u16 len)
 {
        struct hci_dev *hdev;
-       struct pending_cmd *cmd;
        struct mgmt_cp_block_device *cp = (void *) data;
        int err;
 
@@ -1686,32 +1947,23 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
-                                                       EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
-                                                       ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
-       if (!cmd) {
-               err = -ENOMEM;
-               goto failed;
-       }
-
        err = hci_blacklist_add(hdev, &cp->bdaddr);
-
        if (err < 0)
-               err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
+               err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
+                                                       MGMT_STATUS_FAILED);
        else
                err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
                                                        NULL, 0);
 
-       mgmt_pending_remove(cmd);
-
-failed:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
@@ -1722,7 +1974,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
                                                                u16 len)
 {
        struct hci_dev *hdev;
-       struct pending_cmd *cmd;
        struct mgmt_cp_unblock_device *cp = (void *) data;
        int err;
 
@@ -1730,32 +1981,24 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
-                                                               EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
-                                                               ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock_bh(hdev);
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
-       if (!cmd) {
-               err = -ENOMEM;
-               goto failed;
-       }
-
        err = hci_blacklist_del(hdev, &cp->bdaddr);
 
        if (err < 0)
-               err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
+               err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
+                                               MGMT_STATUS_INVALID_PARAMS);
        else
                err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
                                                                NULL, 0);
 
-       mgmt_pending_remove(cmd);
-
-failed:
        hci_dev_unlock_bh(hdev);
        hci_dev_put(hdev);
 
@@ -1775,12 +2018,12 @@ static int set_fast_connectable(struct sock *sk, u16 index,
 
        if (len != sizeof(*cp))
                return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               EINVAL);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hdev = hci_dev_get(index);
        if (!hdev)
                return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               ENODEV);
+                                               MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
@@ -1798,14 +2041,14 @@ static int set_fast_connectable(struct sock *sk, u16 index,
                                                sizeof(acp), &acp);
        if (err < 0) {
                err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               -err);
+                                                       MGMT_STATUS_FAILED);
                goto done;
        }
 
        err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
        if (err < 0) {
                err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
-                                                               -err);
+                                                       MGMT_STATUS_FAILED);
                goto done;
        }
 
@@ -1883,11 +2126,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
        case MGMT_OP_SET_SERVICE_CACHE:
                err = set_service_cache(sk, index, buf + sizeof(*hdr), len);
                break;
-       case MGMT_OP_LOAD_KEYS:
-               err = load_keys(sk, index, buf + sizeof(*hdr), len);
+       case MGMT_OP_LOAD_LINK_KEYS:
+               err = load_link_keys(sk, index, buf + sizeof(*hdr), len);
                break;
-       case MGMT_OP_REMOVE_KEY:
-               err = remove_key(sk, index, buf + sizeof(*hdr), len);
+       case MGMT_OP_REMOVE_KEYS:
+               err = remove_keys(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_DISCONNECT:
                err = disconnect(sk, index, buf + sizeof(*hdr), len);
@@ -1908,10 +2151,18 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                err = pair_device(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_USER_CONFIRM_REPLY:
-               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1);
+               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_USER_CONFIRM_NEG_REPLY:
-               err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
+               err = user_confirm_neg_reply(sk, index, buf + sizeof(*hdr),
+                                                                       len);
+               break;
+       case MGMT_OP_USER_PASSKEY_REPLY:
+               err = user_passkey_reply(sk, index, buf + sizeof(*hdr), len);
+               break;
+       case MGMT_OP_USER_PASSKEY_NEG_REPLY:
+               err = user_passkey_neg_reply(sk, index, buf + sizeof(*hdr),
+                                                                       len);
                break;
        case MGMT_OP_SET_LOCAL_NAME:
                err = set_local_name(sk, index, buf + sizeof(*hdr), len);
@@ -1927,7 +2178,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                                                                        len);
                break;
        case MGMT_OP_START_DISCOVERY:
-               err = start_discovery(sk, index);
+               err = start_discovery(sk, index, buf + sizeof(*hdr), len);
                break;
        case MGMT_OP_STOP_DISCOVERY:
                err = stop_discovery(sk, index);
@@ -1944,7 +2195,8 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                break;
        default:
                BT_DBG("Unknown op %u", opcode);
-               err = cmd_status(sk, index, opcode, 0x01);
+               err = cmd_status(sk, index, opcode,
+                                               MGMT_STATUS_UNKNOWN_COMMAND);
                break;
        }
 
@@ -1958,14 +2210,26 @@ done:
        return err;
 }
 
-int mgmt_index_added(u16 index)
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+{
+       u8 *status = data;
+
+       cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+       mgmt_pending_remove(cmd);
+}
+
+int mgmt_index_added(struct hci_dev *hdev)
 {
-       return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL);
+       return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
-int mgmt_index_removed(u16 index)
+int mgmt_index_removed(struct hci_dev *hdev)
 {
-       return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL);
+       u8 status = ENODEV;
+
+       mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+
+       return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 }
 
 struct cmd_lookup {
@@ -1993,17 +2257,22 @@ static void mode_rsp(struct pending_cmd *cmd, void *data)
        mgmt_pending_free(cmd);
 }
 
-int mgmt_powered(u16 index, u8 powered)
+int mgmt_powered(struct hci_dev *hdev, u8 powered)
 {
        struct mgmt_mode ev;
        struct cmd_lookup match = { powered, NULL };
        int ret;
 
-       mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match);
+       mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, mode_rsp, &match);
+
+       if (!powered) {
+               u8 status = ENETDOWN;
+               mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
+       }
 
        ev.val = powered;
 
-       ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk);
+       ret = mgmt_event(MGMT_EV_POWERED, hdev, &ev, sizeof(ev), match.sk);
 
        if (match.sk)
                sock_put(match.sk);
@@ -2011,17 +2280,17 @@ int mgmt_powered(u16 index, u8 powered)
        return ret;
 }
 
-int mgmt_discoverable(u16 index, u8 discoverable)
+int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
 {
        struct mgmt_mode ev;
        struct cmd_lookup match = { discoverable, NULL };
        int ret;
 
-       mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match);
+       mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, mode_rsp, &match);
 
        ev.val = discoverable;
 
-       ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev),
+       ret = mgmt_event(MGMT_EV_DISCOVERABLE, hdev, &ev, sizeof(ev),
                                                                match.sk);
 
        if (match.sk)
@@ -2030,17 +2299,17 @@ int mgmt_discoverable(u16 index, u8 discoverable)
        return ret;
 }
 
-int mgmt_connectable(u16 index, u8 connectable)
+int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
 {
        struct mgmt_mode ev;
        struct cmd_lookup match = { connectable, NULL };
        int ret;
 
-       mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match);
+       mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, mode_rsp, &match);
 
        ev.val = connectable;
 
-       ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk);
+       ret = mgmt_event(MGMT_EV_CONNECTABLE, hdev, &ev, sizeof(ev), match.sk);
 
        if (match.sk)
                sock_put(match.sk);
@@ -2048,9 +2317,25 @@ int mgmt_connectable(u16 index, u8 connectable)
        return ret;
 }
 
-int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
+int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
-       struct mgmt_ev_new_key ev;
+       u8 mgmt_err = mgmt_status(status);
+
+       if (scan & SCAN_PAGE)
+               mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
+                                               cmd_status_rsp, &mgmt_err);
+
+       if (scan & SCAN_INQUIRY)
+               mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
+                                               cmd_status_rsp, &mgmt_err);
+
+       return 0;
+}
+
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+                                                               u8 persistent)
+{
+       struct mgmt_ev_new_link_key ev;
 
        memset(&ev, 0, sizeof(ev));
 
@@ -2060,17 +2345,18 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
        memcpy(ev.key.val, key->val, 16);
        ev.key.pin_len = key->pin_len;
 
-       return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
+       return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
+int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type)
 {
-       struct mgmt_ev_connected ev;
+       struct mgmt_addr_info ev;
 
        bacpy(&ev.bdaddr, bdaddr);
-       ev.link_type = link_type;
+       ev.type = link_to_mgmt(link_type, addr_type);
 
-       return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
+       return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL);
 }
 
 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -2080,6 +2366,7 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
        struct mgmt_rp_disconnect rp;
 
        bacpy(&rp.bdaddr, &cp->bdaddr);
+       rp.status = 0;
 
        cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp));
 
@@ -2089,75 +2376,110 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data)
        mgmt_pending_remove(cmd);
 }
 
-int mgmt_disconnected(u16 index, bdaddr_t *bdaddr)
+static void remove_keys_rsp(struct pending_cmd *cmd, void *data)
 {
-       struct mgmt_ev_disconnected ev;
+       u8 *status = data;
+       struct mgmt_cp_remove_keys *cp = cmd->param;
+       struct mgmt_rp_remove_keys rp;
+
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.bdaddr, &cp->bdaddr);
+       if (status != NULL)
+               rp.status = *status;
+
+       cmd_complete(cmd->sk, cmd->index, MGMT_OP_REMOVE_KEYS, &rp,
+                                                               sizeof(rp));
+
+       mgmt_pending_remove(cmd);
+}
+
+int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                                               u8 addr_type)
+{
+       struct mgmt_addr_info ev;
        struct sock *sk = NULL;
        int err;
 
-       mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk);
+       mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
        bacpy(&ev.bdaddr, bdaddr);
+       ev.type = link_to_mgmt(link_type, addr_type);
 
-       err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk);
+       err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
 
        if (sk)
                sock_put(sk);
 
+       mgmt_pending_foreach(MGMT_OP_REMOVE_KEYS, hdev, remove_keys_rsp, NULL);
+
        return err;
 }
 
-int mgmt_disconnect_failed(u16 index)
+int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
 {
        struct pending_cmd *cmd;
+       u8 mgmt_err = mgmt_status(status);
        int err;
 
-       cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index);
+       cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
        if (!cmd)
                return -ENOENT;
 
-       err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO);
+       if (bdaddr) {
+               struct mgmt_rp_disconnect rp;
+
+               bacpy(&rp.bdaddr, bdaddr);
+               rp.status = status;
+
+               err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+                                                       &rp, sizeof(rp));
+       } else
+               err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT,
+                                                               mgmt_err);
 
        mgmt_pending_remove(cmd);
 
        return err;
 }
 
-int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                                               u8 addr_type, u8 status)
 {
        struct mgmt_ev_connect_failed ev;
 
-       bacpy(&ev.bdaddr, bdaddr);
-       ev.status = status;
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_mgmt(link_type, addr_type);
+       ev.status = mgmt_status(status);
 
-       return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
+       return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
+int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
 {
        struct mgmt_ev_pin_code_request ev;
 
        bacpy(&ev.bdaddr, bdaddr);
        ev.secure = secure;
 
-       return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
+       return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
                                                                        NULL);
 }
 
-int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status)
 {
        struct pending_cmd *cmd;
        struct mgmt_rp_pin_code_reply rp;
        int err;
 
-       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index);
+       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
        if (!cmd)
                return -ENOENT;
 
        bacpy(&rp.bdaddr, bdaddr);
-       rp.status = status;
+       rp.status = mgmt_status(status);
 
-       err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp,
+       err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp,
                                                                sizeof(rp));
 
        mgmt_pending_remove(cmd);
@@ -2165,20 +2487,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
        return err;
 }
 
-int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status)
 {
        struct pending_cmd *cmd;
        struct mgmt_rp_pin_code_reply rp;
        int err;
 
-       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index);
+       cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
        if (!cmd)
                return -ENOENT;
 
        bacpy(&rp.bdaddr, bdaddr);
-       rp.status = status;
+       rp.status = mgmt_status(status);
 
-       err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
+       err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp,
                                                                sizeof(rp));
 
        mgmt_pending_remove(cmd);
@@ -2186,97 +2509,119 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
        return err;
 }
 
-int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
-                                                       u8 confirm_hint)
+int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                               __le32 value, u8 confirm_hint)
 {
        struct mgmt_ev_user_confirm_request ev;
 
-       BT_DBG("hci%u", index);
+       BT_DBG("%s", hdev->name);
 
        bacpy(&ev.bdaddr, bdaddr);
        ev.confirm_hint = confirm_hint;
        put_unaligned_le32(value, &ev.value);
 
-       return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
+       return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
                                                                        NULL);
 }
 
-static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status,
-                                                               u8 opcode)
+int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct mgmt_ev_user_passkey_request ev;
+
+       BT_DBG("%s", hdev->name);
+
+       bacpy(&ev.bdaddr, bdaddr);
+
+       return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
+                                                                       NULL);
+}
+
+static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                       u8 status, u8 opcode)
 {
        struct pending_cmd *cmd;
        struct mgmt_rp_user_confirm_reply rp;
        int err;
 
-       cmd = mgmt_pending_find(opcode, index);
+       cmd = mgmt_pending_find(opcode, hdev);
        if (!cmd)
                return -ENOENT;
 
        bacpy(&rp.bdaddr, bdaddr);
-       rp.status = status;
-       err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp));
+       rp.status = mgmt_status(status);
+       err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp));
 
        mgmt_pending_remove(cmd);
 
        return err;
 }
 
-int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status)
 {
-       return confirm_reply_complete(index, bdaddr, status,
+       return user_pairing_resp_complete(hdev, bdaddr, status,
                                                MGMT_OP_USER_CONFIRM_REPLY);
 }
 
-int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev,
+                                               bdaddr_t *bdaddr, u8 status)
 {
-       return confirm_reply_complete(index, bdaddr, status,
+       return user_pairing_resp_complete(hdev, bdaddr, status,
                                        MGMT_OP_USER_CONFIRM_NEG_REPLY);
 }
 
-int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
+int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                                               u8 status)
+{
+       return user_pairing_resp_complete(hdev, bdaddr, status,
+                                               MGMT_OP_USER_PASSKEY_REPLY);
+}
+
+int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev,
+                                               bdaddr_t *bdaddr, u8 status)
+{
+       return user_pairing_resp_complete(hdev, bdaddr, status,
+                                       MGMT_OP_USER_PASSKEY_NEG_REPLY);
+}
+
+int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status)
 {
        struct mgmt_ev_auth_failed ev;
 
        bacpy(&ev.bdaddr, bdaddr);
-       ev.status = status;
+       ev.status = mgmt_status(status);
 
-       return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
+       return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
+int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
 {
        struct pending_cmd *cmd;
-       struct hci_dev *hdev;
        struct mgmt_cp_set_local_name ev;
        int err;
 
        memset(&ev, 0, sizeof(ev));
        memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
+       cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
        if (!cmd)
                goto send_event;
 
        if (status) {
-               err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
+               err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+                                                       mgmt_status(status));
                goto failed;
        }
 
-       hdev = hci_dev_get(index);
-       if (hdev) {
-               hci_dev_lock_bh(hdev);
-               update_eir(hdev);
-               hci_dev_unlock_bh(hdev);
-               hci_dev_put(hdev);
-       }
+       update_eir(hdev);
 
-       err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
+       err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev,
                                                                sizeof(ev));
        if (err < 0)
                goto failed;
 
 send_event:
-       err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
+       err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
                                                        cmd ? cmd->sk : NULL);
 
 failed:
@@ -2285,29 +2630,31 @@ failed:
        return err;
 }
 
-int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
-                                                               u8 status)
+int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+                                               u8 *randomizer, u8 status)
 {
        struct pending_cmd *cmd;
        int err;
 
-       BT_DBG("hci%u status %u", index, status);
+       BT_DBG("%s status %u", hdev->name, status);
 
-       cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
+       cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
        if (!cmd)
                return -ENOENT;
 
        if (status) {
-               err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                                       EIO);
+               err = cmd_status(cmd->sk, hdev->id,
+                                               MGMT_OP_READ_LOCAL_OOB_DATA,
+                                               mgmt_status(status));
        } else {
                struct mgmt_rp_read_local_oob_data rp;
 
                memcpy(rp.hash, hash, sizeof(rp.hash));
                memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
 
-               err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                                       &rp, sizeof(rp));
+               err = cmd_complete(cmd->sk, hdev->id,
+                                               MGMT_OP_READ_LOCAL_OOB_DATA,
+                                               &rp, sizeof(rp));
        }
 
        mgmt_pending_remove(cmd);
@@ -2315,14 +2662,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
        return err;
 }
 
-int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
-                                                               u8 *eir)
+int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                               u8 addr_type, u8 *dev_class, s8 rssi, u8 *eir)
 {
        struct mgmt_ev_device_found ev;
 
        memset(&ev, 0, sizeof(ev));
 
-       bacpy(&ev.bdaddr, bdaddr);
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_mgmt(link_type, addr_type);
        ev.rssi = rssi;
 
        if (eir)
@@ -2331,10 +2679,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
        if (dev_class)
                memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
 
-       return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
+       return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
+int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name)
 {
        struct mgmt_ev_remote_name ev;
 
@@ -2343,37 +2691,79 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
        bacpy(&ev.bdaddr, bdaddr);
        memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
 
-       return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
+       return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL);
+}
+
+int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+       if (!cmd)
+               return -ENOENT;
+
+       err = cmd_status(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status));
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+       if (!cmd)
+               return -ENOENT;
+
+       err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
+       mgmt_pending_remove(cmd);
+
+       return err;
 }
 
-int mgmt_discovering(u16 index, u8 discovering)
+int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
 {
-       return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
+       struct pending_cmd *cmd;
+
+       if (discovering)
+               cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+       else
+               cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+
+       if (cmd != NULL) {
+               cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0);
+               mgmt_pending_remove(cmd);
+       }
+
+       return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering,
                                                sizeof(discovering), NULL);
 }
 
-int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
        struct pending_cmd *cmd;
        struct mgmt_ev_device_blocked ev;
 
-       cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+       cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
 
        bacpy(&ev.bdaddr, bdaddr);
 
-       return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
-                                               cmd ? cmd->sk : NULL);
+       return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
+                                                       cmd ? cmd->sk : NULL);
 }
 
-int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr)
 {
        struct pending_cmd *cmd;
        struct mgmt_ev_device_unblocked ev;
 
-       cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+       cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
 
        bacpy(&ev.bdaddr, bdaddr);
 
-       return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
-                                               cmd ? cmd->sk : NULL);
+       return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
+                                                       cmd ? cmd->sk : NULL);
 }
index 4e32e18..8743f36 100644 (file)
@@ -65,7 +65,8 @@ static DEFINE_MUTEX(rfcomm_mutex);
 
 static LIST_HEAD(session_list);
 
-static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len);
+static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
+                                                       u32 priority);
 static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci);
 static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci);
 static int rfcomm_queue_disc(struct rfcomm_dlc *d);
@@ -377,13 +378,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
 static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
 {
        struct rfcomm_dlc *d;
-       struct list_head *p;
 
-       list_for_each(p, &s->dlcs) {
-               d = list_entry(p, struct rfcomm_dlc, list);
+       list_for_each_entry(d, &s->dlcs, list)
                if (d->dlci == dlci)
                        return d;
-       }
+
        return NULL;
 }
 
@@ -749,19 +748,34 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d
 }
 
 /* ---- RFCOMM frame sending ---- */
-static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
+static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len,
+                                                       u32 priority)
 {
        struct socket *sock = s->sock;
+       struct sock *sk = sock->sk;
        struct kvec iv = { data, len };
        struct msghdr msg;
 
-       BT_DBG("session %p len %d", s, len);
+       BT_DBG("session %p len %d priority %u", s, len, priority);
+
+       if (sk->sk_priority != priority) {
+               lock_sock(sk);
+               sk->sk_priority = priority;
+               release_sock(sk);
+       }
 
        memset(&msg, 0, sizeof(msg));
 
        return kernel_sendmsg(sock, &msg, &iv, 1, len);
 }
 
+static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
+{
+       BT_DBG("%p cmd %u", s, cmd->ctrl);
+
+       return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd), HCI_PRIO_MAX);
+}
+
 static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
 {
        struct rfcomm_cmd cmd;
@@ -773,7 +787,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
        cmd.len  = __len8(0);
        cmd.fcs  = __fcs2((u8 *) &cmd);
 
-       return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+       return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
@@ -787,7 +801,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
        cmd.len  = __len8(0);
        cmd.fcs  = __fcs2((u8 *) &cmd);
 
-       return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+       return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
@@ -801,7 +815,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
        cmd.len  = __len8(0);
        cmd.fcs  = __fcs2((u8 *) &cmd);
 
-       return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+       return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_queue_disc(struct rfcomm_dlc *d)
@@ -815,6 +829,8 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
        if (!skb)
                return -ENOMEM;
 
+       skb->priority = HCI_PRIO_MAX;
+
        cmd = (void *) __skb_put(skb, sizeof(*cmd));
        cmd->addr = d->addr;
        cmd->ctrl = __ctrl(RFCOMM_DISC, 1);
@@ -837,7 +853,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
        cmd.len  = __len8(0);
        cmd.fcs  = __fcs2((u8 *) &cmd);
 
-       return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd));
+       return rfcomm_send_cmd(s, &cmd);
 }
 
 static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
@@ -862,7 +878,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d)
@@ -904,7 +920,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
@@ -942,7 +958,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
@@ -969,7 +985,7 @@ static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig)
@@ -996,7 +1012,7 @@ static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
@@ -1018,7 +1034,7 @@ static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
@@ -1040,7 +1056,7 @@ static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len)
@@ -1091,7 +1107,7 @@ static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
 
        *ptr = __fcs(buf); ptr++;
 
-       return rfcomm_send_frame(s, buf, ptr - buf);
+       return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX);
 }
 
 static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
@@ -1769,7 +1785,8 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
                return skb_queue_len(&d->tx_queue);
 
        while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) {
-               err = rfcomm_send_frame(d->session, skb->data, skb->len);
+               err = rfcomm_send_frame(d->session, skb->data, skb->len,
+                                                       skb->priority);
                if (err < 0) {
                        skb_queue_head(&d->tx_queue, skb);
                        break;
@@ -2120,15 +2137,13 @@ static struct hci_cb rfcomm_cb = {
 static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
 {
        struct rfcomm_session *s;
-       struct list_head *pp, *p;
 
        rfcomm_lock();
 
-       list_for_each(p, &session_list) {
-               s = list_entry(p, struct rfcomm_session, list);
-               list_for_each(pp, &s->dlcs) {
+       list_for_each_entry(s, &session_list, list) {
+               struct rfcomm_dlc *d;
+               list_for_each_entry(d, &s->dlcs, list) {
                        struct sock *sk = s->sock->sk;
-                       struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list);
 
                        seq_printf(f, "%s %s %ld %d %d %d %d\n",
                                                batostr(&bt_sk(sk)->src),
index 5417f61..aea2bdd 100644 (file)
@@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        break;
                }
 
+               skb->priority = sk->sk_priority;
+
                err = rfcomm_dlc_send(d, skb);
                if (err < 0) {
                        kfree_skb(skb);
index c258796..fa8f4de 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/capability.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
+#include <linux/workqueue.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -65,7 +66,7 @@ struct rfcomm_dev {
        struct rfcomm_dlc       *dlc;
        struct tty_struct       *tty;
        wait_queue_head_t       wait;
-       struct tasklet_struct   wakeup_task;
+       struct work_struct      wakeup_task;
 
        struct device           *tty_dev;
 
@@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb);
 static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err);
 static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
 
-static void rfcomm_tty_wakeup(unsigned long arg);
+static void rfcomm_tty_wakeup(struct work_struct *work);
 
 /* ---- Device functions ---- */
 static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
@@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev)
 static struct rfcomm_dev *__rfcomm_dev_get(int id)
 {
        struct rfcomm_dev *dev;
-       struct list_head  *p;
 
-       list_for_each(p, &rfcomm_dev_list) {
-               dev = list_entry(p, struct rfcomm_dev, list);
+       list_for_each_entry(dev, &rfcomm_dev_list, list)
                if (dev->id == id)
                        return dev;
-       }
 
        return NULL;
 }
@@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
 
 static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
 {
-       struct rfcomm_dev *dev;
+       struct rfcomm_dev *dev, *entry;
        struct list_head *head = &rfcomm_dev_list, *p;
        int err = 0;
 
@@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
        if (req->dev_id < 0) {
                dev->id = 0;
 
-               list_for_each(p, &rfcomm_dev_list) {
-                       if (list_entry(p, struct rfcomm_dev, list)->id != dev->id)
+               list_for_each_entry(entry, &rfcomm_dev_list, list) {
+                       if (entry->id != dev->id)
                                break;
 
                        dev->id++;
@@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
        } else {
                dev->id = req->dev_id;
 
-               list_for_each(p, &rfcomm_dev_list) {
-                       struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list);
-
+               list_for_each_entry(entry, &rfcomm_dev_list, list) {
                        if (entry->id == dev->id) {
                                err = -EADDRINUSE;
                                goto out;
@@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
        atomic_set(&dev->opened, 0);
 
        init_waitqueue_head(&dev->wait);
-       tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev);
+       INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup);
 
        skb_queue_head_init(&dev->pending);
 
@@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
        struct rfcomm_dev *dev = (void *) skb->sk;
        atomic_sub(skb->truesize, &dev->wmem_alloc);
        if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
-               tasklet_schedule(&dev->wakeup_task);
+               queue_work(system_nrt_wq, &dev->wakeup_task);
        rfcomm_dev_put(dev);
 }
 
@@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg)
 
 static int rfcomm_get_dev_list(void __user *arg)
 {
+       struct rfcomm_dev *dev;
        struct rfcomm_dev_list_req *dl;
        struct rfcomm_dev_info *di;
-       struct list_head *p;
        int n = 0, size, err;
        u16 dev_num;
 
@@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg)
 
        read_lock_bh(&rfcomm_dev_lock);
 
-       list_for_each(p, &rfcomm_dev_list) {
-               struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
+       list_for_each_entry(dev, &rfcomm_dev_list, list) {
                if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
                        continue;
                (di + n)->id      = dev->id;
@@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
 }
 
 /* ---- TTY functions ---- */
-static void rfcomm_tty_wakeup(unsigned long arg)
+static void rfcomm_tty_wakeup(struct work_struct *work)
 {
-       struct rfcomm_dev *dev = (void *) arg;
+       struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev,
+                                                               wakeup_task);
        struct tty_struct *tty = dev->tty;
        if (!tty)
                return;
@@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
                rfcomm_dlc_close(dev->dlc, 0);
 
                clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
-               tasklet_kill(&dev->wakeup_task);
+               cancel_work_sync(&dev->wakeup_task);
 
                rfcomm_dlc_lock(dev->dlc);
                tty->driver_data = NULL;
@@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = {
 
 int __init rfcomm_init_ttys(void)
 {
+       int error;
+
        rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
        if (!rfcomm_tty_driver)
-               return -1;
+               return -ENOMEM;
 
        rfcomm_tty_driver->owner        = THIS_MODULE;
        rfcomm_tty_driver->driver_name  = "rfcomm";
@@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void)
        rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
        tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
 
-       if (tty_register_driver(rfcomm_tty_driver)) {
+       error = tty_register_driver(rfcomm_tty_driver);
+       if (error) {
                BT_ERR("Can't register RFCOMM TTY driver");
                put_tty_driver(rfcomm_tty_driver);
-               return -1;
+               return error;
        }
 
        BT_INFO("RFCOMM TTY layer initialized");
index 759b635..0b96737 100644 (file)
@@ -181,7 +181,8 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
        if (!skb)
                return;
 
-       hci_send_acl(conn->hcon, skb, 0);
+       skb->priority = HCI_PRIO_MAX;
+       hci_send_acl(conn->hchan, skb, 0);
 
        mod_timer(&conn->security_timer, jiffies +
                                        msecs_to_jiffies(SMP_TIMEOUT));
@@ -231,6 +232,18 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
        return 0;
 }
 
+static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
+{
+       if (send)
+               smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
+                                                               &reason);
+
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->pend);
+       mgmt_auth_failed(conn->hcon->hdev, conn->dst, reason);
+       del_timer(&conn->security_timer);
+       smp_chan_destroy(conn);
+}
+
 static void confirm_work(struct work_struct *work)
 {
        struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
@@ -269,8 +282,7 @@ static void confirm_work(struct work_struct *work)
        return;
 
 error:
-       smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-       smp_chan_destroy(conn);
+       smp_failure(conn, reason, 1);
 }
 
 static void random_work(struct work_struct *work)
@@ -353,8 +365,7 @@ static void random_work(struct work_struct *work)
        return;
 
 error:
-       smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
-       smp_chan_destroy(conn);
+       smp_failure(conn, reason, 1);
 }
 
 static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
@@ -378,7 +389,15 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
 
 void smp_chan_destroy(struct l2cap_conn *conn)
 {
-       kfree(conn->smp_chan);
+       struct smp_chan *smp = conn->smp_chan;
+
+       clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+
+       if (smp->tfm)
+               crypto_free_blkcipher(smp->tfm);
+
+       kfree(smp);
+       conn->smp_chan = NULL;
        hci_conn_put(conn->hcon);
 }
 
@@ -646,6 +665,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
                break;
 
        case SMP_CMD_PAIRING_FAIL:
+               smp_failure(conn, skb->data[0], 0);
                reason = 0;
                err = -EPERM;
                break;
@@ -691,8 +711,7 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 
 done:
        if (reason)
-               smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
-                                                               &reason);
+               smp_failure(conn, reason, 1);
 
        kfree_skb(skb);
        return err;
index feb77ea..71773b0 100644 (file)
@@ -170,8 +170,11 @@ static int br_set_mac_address(struct net_device *dev, void *p)
                return -EINVAL;
 
        spin_lock_bh(&br->lock);
-       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-       br_stp_change_bridge_id(br, addr->sa_data);
+       if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
+               memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+               br_fdb_change_mac_address(br, addr->sa_data);
+               br_stp_change_bridge_id(br, addr->sa_data);
+       }
        br->flags |= BR_SET_MAC_ADDR;
        spin_unlock_bh(&br->lock);
 
@@ -186,7 +189,8 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
        strcpy(info->bus_info, "N/A");
 }
 
-static u32 br_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t br_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        struct net_bridge *br = netdev_priv(dev);
 
@@ -341,10 +345,10 @@ void br_dev_setup(struct net_device *dev)
        dev->priv_flags = IFF_EBRIDGE;
 
        dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-                       NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
+                       NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
                        NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
        dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
-                          NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
                           NETIF_F_HW_VLAN_TX;
 
        br->dev = dev;
index c8e7861..a1429af 100644 (file)
@@ -28,7 +28,8 @@
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                      const unsigned char *addr);
-static void fdb_notify(const struct net_bridge_fdb_entry *, int);
+static void fdb_notify(struct net_bridge *br,
+                      const struct net_bridge_fdb_entry *, int);
 
 static u32 fdb_salt __read_mostly;
 
@@ -80,10 +81,10 @@ static void fdb_rcu_free(struct rcu_head *head)
        kmem_cache_free(br_fdb_cache, ent);
 }
 
-static inline void fdb_delete(struct net_bridge_fdb_entry *f)
+static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 {
-       fdb_notify(f, RTM_DELNEIGH);
        hlist_del_rcu(&f->hlist);
+       fdb_notify(br, f, RTM_DELNEIGH);
        call_rcu(&f->rcu, fdb_rcu_free);
 }
 
@@ -114,7 +115,7 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
                                }
 
                                /* delete old one */
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                                goto insert;
                        }
                }
@@ -126,6 +127,18 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
        spin_unlock_bh(&br->hash_lock);
 }
 
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+{
+       struct net_bridge_fdb_entry *f;
+
+       /* If old entry was unassociated with any port, then delete it. */
+       f = __br_fdb_get(br, br->dev->dev_addr);
+       if (f && f->is_local && !f->dst)
+               fdb_delete(br, f);
+
+       fdb_insert(br, NULL, newaddr);
+}
+
 void br_fdb_cleanup(unsigned long _data)
 {
        struct net_bridge *br = (struct net_bridge *)_data;
@@ -144,7 +157,7 @@ void br_fdb_cleanup(unsigned long _data)
                                continue;
                        this_timer = f->updated + delay;
                        if (time_before_eq(this_timer, jiffies))
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                        else if (time_before(this_timer, next_timer))
                                next_timer = this_timer;
                }
@@ -165,7 +178,7 @@ void br_fdb_flush(struct net_bridge *br)
                struct hlist_node *h, *n;
                hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
                        if (!f->is_static)
-                               fdb_delete(f);
+                               fdb_delete(br, f);
                }
        }
        spin_unlock_bh(&br->hash_lock);
@@ -209,7 +222,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
                                }
                        }
 
-                       fdb_delete(f);
+                       fdb_delete(br, f);
                skip_delete: ;
                }
        }
@@ -249,7 +262,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
                ret = 0;
        else {
                fdb = __br_fdb_get(port->br, addr);
-               ret = fdb && fdb->dst->dev != dev &&
+               ret = fdb && fdb->dst && fdb->dst->dev != dev &&
                        fdb->dst->state == BR_STATE_FORWARDING;
        }
        rcu_read_unlock();
@@ -281,6 +294,10 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
                        if (has_expired(br, f))
                                continue;
 
+                       /* ignore pseudo entry for local MAC address */
+                       if (!f->dst)
+                               continue;
+
                        if (skip) {
                                --skip;
                                continue;
@@ -347,7 +364,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                fdb->is_static = 0;
                fdb->updated = fdb->used = jiffies;
                hlist_add_head_rcu(&fdb->hlist, head);
-               fdb_notify(fdb, RTM_NEWNEIGH);
        }
        return fdb;
 }
@@ -371,7 +387,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                br_warn(br, "adding interface %s with same address "
                       "as a received packet\n",
                       source->dev->name);
-               fdb_delete(fdb);
+               fdb_delete(br, fdb);
        }
 
        fdb = fdb_create(head, source, addr);
@@ -379,6 +395,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                return -ENOMEM;
 
        fdb->is_local = fdb->is_static = 1;
+       fdb_notify(br, fdb, RTM_NEWNEIGH);
        return 0;
 }
 
@@ -424,9 +441,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                }
        } else {
                spin_lock(&br->hash_lock);
-               if (likely(!fdb_find(head, addr)))
-                       fdb_create(head, source, addr);
-
+               if (likely(!fdb_find(head, addr))) {
+                       fdb = fdb_create(head, source, addr);
+                       if (fdb)
+                               fdb_notify(br, fdb, RTM_NEWNEIGH);
+               }
                /* else  we lose race and someone else inserts
                 * it first, don't bother updating
                 */
@@ -446,7 +465,7 @@ static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
                return NUD_REACHABLE;
 }
 
-static int fdb_fill_info(struct sk_buff *skb,
+static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
                         const struct net_bridge_fdb_entry *fdb,
                         u32 pid, u32 seq, int type, unsigned int flags)
 {
@@ -459,14 +478,13 @@ static int fdb_fill_info(struct sk_buff *skb,
        if (nlh == NULL)
                return -EMSGSIZE;
 
-
        ndm = nlmsg_data(nlh);
        ndm->ndm_family  = AF_BRIDGE;
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = 0;
        ndm->ndm_type    = 0;
-       ndm->ndm_ifindex = fdb->dst->dev->ifindex;
+       ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
        ndm->ndm_state   = fdb_to_nud(fdb);
 
        NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
@@ -491,9 +509,10 @@ static inline size_t fdb_nlmsg_size(void)
                + nla_total_size(sizeof(struct nda_cacheinfo));
 }
 
-static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
+static void fdb_notify(struct net_bridge *br,
+                      const struct net_bridge_fdb_entry *fdb, int type)
 {
-       struct net *net = dev_net(fdb->dst->dev);
+       struct net *net = dev_net(br->dev);
        struct sk_buff *skb;
        int err = -ENOBUFS;
 
@@ -501,7 +520,7 @@ static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
        if (skb == NULL)
                goto errout;
 
-       err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
+       err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -538,7 +557,7 @@ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                if (idx < cb->args[0])
                                        goto skip;
 
-                               if (fdb_fill_info(skb, f,
+                               if (fdb_fill_info(skb, br, f,
                                                  NETLINK_CB(cb->skb).pid,
                                                  cb->nlh->nlmsg_seq,
                                                  RTM_NEWNEIGH,
@@ -556,7 +575,7 @@ skip:
        return skb->len;
 }
 
-/* Create new static fdb entry */
+/* Update (create or replace) forwarding database entry */
 static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                         __u16 state, __u16 flags)
 {
@@ -572,19 +591,25 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
                fdb = fdb_create(head, source, addr);
                if (!fdb)
                        return -ENOMEM;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
        } else {
                if (flags & NLM_F_EXCL)
                        return -EEXIST;
+       }
+
+       if (fdb_to_nud(fdb) != state) {
+               if (state & NUD_PERMANENT)
+                       fdb->is_local = fdb->is_static = 1;
+               else if (state & NUD_NOARP) {
+                       fdb->is_local = 0;
+                       fdb->is_static = 1;
+               } else
+                       fdb->is_local = fdb->is_static = 0;
 
-               if (flags & NLM_F_REPLACE)
-                       fdb->updated = fdb->used = jiffies;
-               fdb->is_local = fdb->is_static = 0;
+               fdb->updated = fdb->used = jiffies;
+               fdb_notify(br, fdb, RTM_NEWNEIGH);
        }
 
-       if (state & NUD_PERMANENT)
-               fdb->is_local = fdb->is_static = 1;
-       else if (state & NUD_NOARP)
-               fdb->is_static = 1;
        return 0;
 }
 
@@ -627,6 +652,11 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                return -EINVAL;
        }
 
+       if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
+               pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
+               return -EINVAL;
+       }
+
        p = br_port_get_rtnl(dev);
        if (p == NULL) {
                pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
@@ -634,9 +664,15 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                return -EINVAL;
        }
 
-       spin_lock_bh(&p->br->hash_lock);
-       err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
-       spin_unlock_bh(&p->br->hash_lock);
+       if (ndm->ndm_flags & NTF_USE) {
+               rcu_read_lock();
+               br_fdb_update(p->br, p, addr);
+               rcu_read_unlock();
+       } else {
+               spin_lock_bh(&p->br->hash_lock);
+               err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
+               spin_unlock_bh(&p->br->hash_lock);
+       }
 
        return err;
 }
@@ -651,7 +687,7 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
        if (!fdb)
                return -ENOENT;
 
-       fdb_delete(fdb);
+       fdb_delete(p->br, fdb);
        return 0;
 }
 
index ee64287..61f6534 100644 (file)
@@ -98,7 +98,7 @@ static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 /* called with rcu_read_lock */
 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-       if (should_deliver(to, skb)) {
+       if (to && should_deliver(to, skb)) {
                __br_deliver(to, skb);
                return;
        }
index f603e5b..0a942fb 100644 (file)
@@ -296,10 +296,11 @@ int br_min_mtu(const struct net_bridge *br)
 /*
  * Recomputes features using slave's features
  */
-u32 br_features_recompute(struct net_bridge *br, u32 features)
+netdev_features_t br_features_recompute(struct net_bridge *br,
+       netdev_features_t features)
 {
        struct net_bridge_port *p;
-       u32 mask;
+       netdev_features_t mask;
 
        if (list_empty(&br->port_list))
                return features;
index a5f4e57..568d5bf 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/slab.h>
 #include <linux/timer.h>
 #include <net/ip.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/mld.h>
 #include <net/addrconf.h>
@@ -36,7 +36,7 @@
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
 {
        if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
@@ -52,7 +52,7 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
        switch (a->proto) {
        case htons(ETH_P_IP):
                return a->u.ip4 == b->u.ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
 #endif
@@ -65,7 +65,7 @@ static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
        return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
                                const struct in6_addr *ip)
 {
@@ -79,7 +79,7 @@ static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
        switch (ip->proto) {
        case htons(ETH_P_IP):
                return __br_ip4_hash(mdb, ip->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return __br_ip6_hash(mdb, &ip->u.ip6);
 #endif
@@ -121,13 +121,13 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
        return br_mdb_ip_get(mdb, &br_dst);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
        struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
 {
        struct br_ip br_dst;
 
-       ipv6_addr_copy(&br_dst.u.ip6, dst);
+       br_dst.u.ip6 = *dst;
        br_dst.proto = htons(ETH_P_IPV6);
 
        return br_mdb_ip_get(mdb, &br_dst);
@@ -152,9 +152,9 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
        case htons(ETH_P_IP):
                ip.u.ip4 = ip_hdr(skb)->daddr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
-               ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
+               ip.u.ip6 = ipv6_hdr(skb)->daddr;
                break;
 #endif
        default:
@@ -411,7 +411,7 @@ out:
        return skb;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
                                                    const struct in6_addr *group)
 {
@@ -474,7 +474,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        mldq->mld_cksum = 0;
        mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
        mldq->mld_reserved = 0;
-       ipv6_addr_copy(&mldq->mld_mca, group);
+       mldq->mld_mca = *group;
 
        /* checksum */
        mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
@@ -496,7 +496,7 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
        switch (addr->proto) {
        case htons(ETH_P_IP):
                return br_ip4_multicast_alloc_query(br, addr->u.ip4);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
 #endif
@@ -773,7 +773,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        return br_multicast_add_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
                                      const struct in6_addr *group)
@@ -783,7 +783,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
        if (!ipv6_is_transient_multicast(group))
                return 0;
 
-       ipv6_addr_copy(&br_group.u.ip6, group);
+       br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
 
        return br_multicast_add_group(br, port, &br_group);
@@ -845,7 +845,7 @@ static void br_multicast_send_query(struct net_bridge *br,
        br_group.proto = htons(ETH_P_IP);
        __br_multicast_send_query(br, port, &br_group);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        br_group.proto = htons(ETH_P_IPV6);
        __br_multicast_send_query(br, port, &br_group);
 #endif
@@ -989,7 +989,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                                        struct net_bridge_port *port,
                                        struct sk_buff *skb)
@@ -1185,7 +1185,7 @@ out:
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_query(struct net_bridge *br,
                                  struct net_bridge_port *port,
                                  struct sk_buff *skb)
@@ -1334,7 +1334,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
        br_multicast_leave_group(br, port, &br_group);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         const struct in6_addr *group)
@@ -1344,7 +1344,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
        if (!ipv6_is_transient_multicast(group))
                return;
 
-       ipv6_addr_copy(&br_group.u.ip6, group);
+       br_group.u.ip6 = *group;
        br_group.proto = htons(ETH_P_IPV6);
 
        br_multicast_leave_group(br, port, &br_group);
@@ -1449,7 +1449,7 @@ err_out:
        return err;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int br_multicast_ipv6_rcv(struct net_bridge *br,
                                 struct net_bridge_port *port,
                                 struct sk_buff *skb)
@@ -1458,6 +1458,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
        const struct ipv6hdr *ip6h;
        u8 icmp6_type;
        u8 nexthdr;
+       __be16 frag_off;
        unsigned len;
        int offset;
        int err;
@@ -1483,7 +1484,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                return -EINVAL;
 
        nexthdr = ip6h->nexthdr;
-       offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
+       offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
 
        if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
                return 0;
@@ -1595,7 +1596,7 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                return br_multicast_ipv4_rcv(br, port, skb);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case htons(ETH_P_IPV6):
                return br_multicast_ipv6_rcv(br, port, skb);
 #endif
index d6ec372..834dfab 100644 (file)
@@ -356,7 +356,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
        if (!skb->dev)
                goto free_skb;
        dst = skb_dst(skb);
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh->hh.hh_len) {
                neigh_hh_bridge(&neigh->hh, skb);
                skb->dev = nf_bridge->physindev;
index d7d6fb0..57dcd14 100644 (file)
@@ -56,7 +56,7 @@ struct br_ip
 {
        union {
                __be32  ip4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct in6_addr ip6;
 #endif
        } u;
@@ -348,6 +348,7 @@ extern void br_fdb_fini(void);
 extern void br_fdb_flush(struct net_bridge *br);
 extern void br_fdb_changeaddr(struct net_bridge_port *p,
                              const unsigned char *newaddr);
+extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
 extern void br_fdb_cleanup(unsigned long arg);
 extern void br_fdb_delete_by_port(struct net_bridge *br,
                                  const struct net_bridge_port *p, int do_all);
@@ -387,7 +388,8 @@ extern int br_add_if(struct net_bridge *br,
 extern int br_del_if(struct net_bridge *br,
              struct net_device *dev);
 extern int br_min_mtu(const struct net_bridge *br);
-extern u32 br_features_recompute(struct net_bridge *br, u32 features);
+extern netdev_features_t br_features_recompute(struct net_bridge *br,
+       netdev_features_t features);
 
 /* br_input.c */
 extern int br_handle_frame_finish(struct sk_buff *skb);
index 2ed0056..99c8566 100644 (file)
@@ -55,9 +55,10 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
                return false;
        if (info->bitmask & EBT_IP6_PROTO) {
                uint8_t nexthdr = ih6->nexthdr;
+               __be16 frag_off;
                int offset_ph;
 
-               offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr);
+               offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
                if (offset_ph == -1)
                        return false;
                if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
index 6e5a8bb..88d7d1d 100644 (file)
@@ -113,6 +113,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                const struct ipv6hdr *ih;
                struct ipv6hdr _iph;
                uint8_t nexthdr;
+               __be16 frag_off;
                int offset_ph;
 
                ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
@@ -123,7 +124,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
                printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d",
                       &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr);
                nexthdr = ih->nexthdr;
-               offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr);
+               offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off);
                if (offset_ph == -1)
                        goto out;
                print_ports(skb, nexthdr, offset_ph);
index 529750d..936361e 100644 (file)
@@ -40,3 +40,14 @@ config CAIF_NETDEV
        If you select to build it as a built-in then the main CAIF device must
        also be a built-in.
        If unsure say Y.
+
+config CAIF_USB
+       tristate "CAIF USB support"
+       depends on CAIF
+       default n
+       ---help---
+       Say Y if you are using CAIF over USB CDC NCM.
+       This can be either built-in or a loadable module,
+       If you select to build it as a built-in then the main CAIF device must
+       also be a built-in.
+       If unsure say N.
index ebcd4e7..cc2b511 100644 (file)
@@ -10,5 +10,6 @@ caif-y := caif_dev.o \
 obj-$(CONFIG_CAIF) += caif.o
 obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
 obj-$(CONFIG_CAIF) += caif_socket.o
+obj-$(CONFIG_CAIF_USB) += caif_usb.o
 
 export-y := caif.o
index f1fa1f6..b0ce14f 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
+#include <linux/spinlock.h>
 #include <net/netns/generic.h>
 #include <net/net_namespace.h>
 #include <net/pkt_sched.h>
@@ -24,6 +25,7 @@
 #include <net/caif/caif_layer.h>
 #include <net/caif/cfpkt.h>
 #include <net/caif/cfcnfg.h>
+#include <net/caif/cfserl.h>
 
 MODULE_LICENSE("GPL");
 
@@ -33,6 +35,10 @@ struct caif_device_entry {
        struct list_head list;
        struct net_device *netdev;
        int __percpu *pcpu_refcnt;
+       spinlock_t flow_lock;
+       struct sk_buff *xoff_skb;
+       void (*xoff_skb_dtor)(struct sk_buff *skb);
+       bool xoff;
 };
 
 struct caif_device_entry_list {
@@ -47,13 +53,14 @@ struct caif_net {
 };
 
 static int caif_net_id;
+static int q_high = 50; /* Percent */
 
 struct cfcnfg *get_cfcnfg(struct net *net)
 {
        struct caif_net *caifn;
-       BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (!caifn)
+               return NULL;
        return caifn->cfg;
 }
 EXPORT_SYMBOL(get_cfcnfg);
@@ -61,9 +68,9 @@ EXPORT_SYMBOL(get_cfcnfg);
 static struct caif_device_entry_list *caif_device_list(struct net *net)
 {
        struct caif_net *caifn;
-       BUG_ON(!net);
        caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (!caifn)
+               return NULL;
        return &caifn->caifdevs;
 }
 
@@ -92,7 +99,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
        struct caif_device_entry *caifd;
 
        caifdevs = caif_device_list(dev_net(dev));
-       BUG_ON(!caifdevs);
+       if (!caifdevs)
+               return NULL;
 
        caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
        if (!caifd)
@@ -112,7 +120,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        struct caif_device_entry_list *caifdevs =
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
-       BUG_ON(!caifdevs);
+       if (!caifdevs)
+               return NULL;
+
        list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
                if (caifd->netdev == dev)
                        return caifd;
@@ -120,15 +130,106 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        return NULL;
 }
 
+void caif_flow_cb(struct sk_buff *skb)
+{
+       struct caif_device_entry *caifd;
+       void (*dtor)(struct sk_buff *skb) = NULL;
+       bool send_xoff;
+
+       WARN_ON(skb->dev == NULL);
+
+       rcu_read_lock();
+       caifd = caif_get(skb->dev);
+       caifd_hold(caifd);
+       rcu_read_unlock();
+
+       spin_lock_bh(&caifd->flow_lock);
+       send_xoff = caifd->xoff;
+       caifd->xoff = 0;
+       if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
+               WARN_ON(caifd->xoff_skb != skb);
+               dtor = caifd->xoff_skb_dtor;
+               caifd->xoff_skb = NULL;
+               caifd->xoff_skb_dtor = NULL;
+       }
+       spin_unlock_bh(&caifd->flow_lock);
+
+       if (dtor)
+               dtor(skb);
+
+       if (send_xoff)
+               caifd->layer.up->
+                       ctrlcmd(caifd->layer.up,
+                               _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
+                               caifd->layer.id);
+       caifd_put(caifd);
+}
+
 static int transmit(struct cflayer *layer, struct cfpkt *pkt)
 {
-       int err;
+       int err, high = 0, qlen = 0;
+       struct caif_dev_common *caifdev;
        struct caif_device_entry *caifd =
            container_of(layer, struct caif_device_entry, layer);
        struct sk_buff *skb;
+       struct netdev_queue *txq;
+
+       rcu_read_lock_bh();
 
        skb = cfpkt_tonative(pkt);
        skb->dev = caifd->netdev;
+       skb_reset_network_header(skb);
+       skb->protocol = htons(ETH_P_CAIF);
+       caifdev = netdev_priv(caifd->netdev);
+
+       /* Check if we need to handle xoff */
+       if (likely(caifd->netdev->tx_queue_len == 0))
+               goto noxoff;
+
+       if (unlikely(caifd->xoff))
+               goto noxoff;
+
+       if (likely(!netif_queue_stopped(caifd->netdev))) {
+               /* If we run with a TX queue, check if the queue is too long*/
+               txq = netdev_get_tx_queue(skb->dev, 0);
+               qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc));
+
+               if (likely(qlen == 0))
+                       goto noxoff;
+
+               high = (caifd->netdev->tx_queue_len * q_high) / 100;
+               if (likely(qlen < high))
+                       goto noxoff;
+       }
+
+       /* Hold lock while accessing xoff */
+       spin_lock_bh(&caifd->flow_lock);
+       if (caifd->xoff) {
+               spin_unlock_bh(&caifd->flow_lock);
+               goto noxoff;
+       }
+
+       /*
+        * Handle flow off, we do this by temporary hi-jacking this
+        * skb's destructor function, and replace it with our own
+        * flow-on callback. The callback will set flow-on and call
+        * the original destructor.
+        */
+
+       pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
+                       netif_queue_stopped(caifd->netdev),
+                       qlen, high);
+       caifd->xoff = 1;
+       caifd->xoff_skb = skb;
+       caifd->xoff_skb_dtor = skb->destructor;
+       skb->destructor = caif_flow_cb;
+       spin_unlock_bh(&caifd->flow_lock);
+
+       caifd->layer.up->ctrlcmd(caifd->layer.up,
+                                       _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+                                       caifd->layer.id);
+noxoff:
+       rcu_read_unlock_bh();
 
        err = dev_queue_xmit(skb);
        if (err > 0)
@@ -172,7 +273,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
 
        /* Release reference to stack upwards */
        caifd_put(caifd);
-       return 0;
+
+       if (err != 0)
+               err = NET_RX_DROP;
+       return err;
 }
 
 static struct packet_type caif_packet_type __read_mostly = {
@@ -203,6 +307,57 @@ static void dev_flowctrl(struct net_device *dev, int on)
        caifd_put(caifd);
 }
 
+void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+                       struct cflayer *link_support, int head_room,
+                       struct cflayer **layer, int (**rcv_func)(
+                               struct sk_buff *, struct net_device *,
+                               struct packet_type *, struct net_device *))
+{
+       struct caif_device_entry *caifd;
+       enum cfcnfg_phy_preference pref;
+       struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
+       struct caif_device_entry_list *caifdevs;
+
+       caifdevs = caif_device_list(dev_net(dev));
+       if (!cfg || !caifdevs)
+               return;
+       caifd = caif_device_alloc(dev);
+       if (!caifd)
+               return;
+       *layer = &caifd->layer;
+       spin_lock_init(&caifd->flow_lock);
+
+       switch (caifdev->link_select) {
+       case CAIF_LINK_HIGH_BANDW:
+               pref = CFPHYPREF_HIGH_BW;
+               break;
+       case CAIF_LINK_LOW_LATENCY:
+               pref = CFPHYPREF_LOW_LAT;
+               break;
+       default:
+               pref = CFPHYPREF_HIGH_BW;
+               break;
+       }
+       mutex_lock(&caifdevs->lock);
+       list_add_rcu(&caifd->list, &caifdevs->list);
+
+       strncpy(caifd->layer.name, dev->name,
+               sizeof(caifd->layer.name) - 1);
+       caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+       caifd->layer.transmit = transmit;
+       cfcnfg_add_phy_layer(cfg,
+                               dev,
+                               &caifd->layer,
+                               pref,
+                               link_support,
+                               caifdev->use_fcs,
+                               head_room);
+       mutex_unlock(&caifdevs->lock);
+       if (rcv_func)
+               *rcv_func = receive;
+}
+EXPORT_SYMBOL(caif_enroll_dev);
+
 /* notify Caif of device events */
 static int caif_device_notify(struct notifier_block *me, unsigned long what,
                              void *arg)
@@ -210,62 +365,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        struct net_device *dev = arg;
        struct caif_device_entry *caifd = NULL;
        struct caif_dev_common *caifdev;
-       enum cfcnfg_phy_preference pref;
-       enum cfcnfg_phy_type phy_type;
        struct cfcnfg *cfg;
+       struct cflayer *layer, *link_support;
+       int head_room = 0;
        struct caif_device_entry_list *caifdevs;
 
-       if (dev->type != ARPHRD_CAIF)
-               return 0;
-
        cfg = get_cfcnfg(dev_net(dev));
-       if (cfg == NULL)
+       caifdevs = caif_device_list(dev_net(dev));
+       if (!cfg || !caifdevs)
                return 0;
 
-       caifdevs = caif_device_list(dev_net(dev));
+       caifd = caif_get(dev);
+       if (caifd == NULL && dev->type != ARPHRD_CAIF)
+               return 0;
 
        switch (what) {
        case NETDEV_REGISTER:
-               caifd = caif_device_alloc(dev);
-               if (!caifd)
-                       return 0;
+               if (caifd != NULL)
+                       break;
 
                caifdev = netdev_priv(dev);
-               caifdev->flowctrl = dev_flowctrl;
 
-               caifd->layer.transmit = transmit;
-
-               if (caifdev->use_frag)
-                       phy_type = CFPHYTYPE_FRAG;
-               else
-                       phy_type = CFPHYTYPE_CAIF;
-
-               switch (caifdev->link_select) {
-               case CAIF_LINK_HIGH_BANDW:
-                       pref = CFPHYPREF_HIGH_BW;
-                       break;
-               case CAIF_LINK_LOW_LATENCY:
-                       pref = CFPHYPREF_LOW_LAT;
-                       break;
-               default:
-                       pref = CFPHYPREF_HIGH_BW;
-                       break;
+               link_support = NULL;
+               if (caifdev->use_frag) {
+                       head_room = 1;
+                       link_support = cfserl_create(dev->ifindex,
+                                                       caifdev->use_stx);
+                       if (!link_support) {
+                               pr_warn("Out of memory\n");
+                               break;
+                       }
                }
-               strncpy(caifd->layer.name, dev->name,
-                       sizeof(caifd->layer.name) - 1);
-               caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
-
-               mutex_lock(&caifdevs->lock);
-               list_add_rcu(&caifd->list, &caifdevs->list);
-
-               cfcnfg_add_phy_layer(cfg,
-                                    phy_type,
-                                    dev,
-                                    &caifd->layer,
-                                    pref,
-                                    caifdev->use_fcs,
-                                    caifdev->use_stx);
-               mutex_unlock(&caifdevs->lock);
+               caif_enroll_dev(dev, caifdev, link_support, head_room,
+                               &layer, NULL);
+               caifdev->flowctrl = dev_flowctrl;
                break;
 
        case NETDEV_UP:
@@ -277,6 +410,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                        break;
                }
 
+               caifd->xoff = 0;
                cfcnfg_set_phy_state(cfg, &caifd->layer, true);
                rcu_read_unlock();
 
@@ -298,6 +432,24 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                caifd->layer.up->ctrlcmd(caifd->layer.up,
                                         _CAIF_CTRLCMD_PHYIF_DOWN_IND,
                                         caifd->layer.id);
+
+               spin_lock_bh(&caifd->flow_lock);
+
+               /*
+                * Replace our xoff-destructor with original destructor.
+                * We trust that skb->destructor *always* is called before
+                * the skb reference is invalid. The hijacked SKB destructor
+                * takes the flow_lock so manipulating the skb->destructor here
+                * should be safe.
+               */
+               if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
+                       caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
+
+               caifd->xoff = 0;
+               caifd->xoff_skb_dtor = NULL;
+               caifd->xoff_skb = NULL;
+
+               spin_unlock_bh(&caifd->flow_lock);
                caifd_put(caifd);
                break;
 
@@ -353,15 +505,15 @@ static struct notifier_block caif_device_notifier = {
 static int caif_init_net(struct net *net)
 {
        struct caif_net *caifn = net_generic(net, caif_net_id);
-       BUG_ON(!caifn);
+       if (WARN_ON(!caifn))
+               return -EINVAL;
+
        INIT_LIST_HEAD(&caifn->caifdevs.list);
        mutex_init(&caifn->caifdevs.lock);
 
        caifn->cfg = cfcnfg_create();
-       if (!caifn->cfg) {
-               pr_warn("can't create cfcnfg\n");
+       if (!caifn->cfg)
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -371,17 +523,14 @@ static void caif_exit_net(struct net *net)
        struct caif_device_entry *caifd, *tmp;
        struct caif_device_entry_list *caifdevs =
            caif_device_list(net);
-       struct cfcnfg *cfg;
+       struct cfcnfg *cfg =  get_cfcnfg(net);
+
+       if (!cfg || !caifdevs)
+               return;
 
        rtnl_lock();
        mutex_lock(&caifdevs->lock);
 
-       cfg = get_cfcnfg(net);
-       if (cfg == NULL) {
-               mutex_unlock(&caifdevs->lock);
-               return;
-       }
-
        list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
                int i = 0;
                list_del_rcu(&caifd->list);
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
new file mode 100644 (file)
index 0000000..f5db57c
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * CAIF USB handler
+ * Copyright (C) ST-Ericsson AB 2011
+ * Author:     Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/usbnet.h>
+#include <net/netns/generic.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+
+#define CFUSB_PAD_DESCR_SZ 1   /* Alignment descriptor length */
+#define CFUSB_ALIGNMENT 4      /* Number of bytes to align. */
+#define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
+#define STE_USB_VID 0x04cc     /* USB Product ID for ST-Ericsson */
+#define STE_USB_PID_CAIF 0x2306        /* Product id for CAIF Modems */
+
+struct cfusbl {
+       struct cflayer layer;
+       u8 tx_eth_hdr[ETH_HLEN];
+};
+
+static bool pack_added;
+
+static int cfusbl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+       u8 hpad;
+
+       /* Remove padding. */
+       cfpkt_extr_head(pkt, &hpad, 1);
+       cfpkt_extr_head(pkt, NULL, hpad);
+       return layr->up->receive(layr->up, pkt);
+}
+
+static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+       struct caif_payload_info *info;
+       u8 hpad;
+       u8 zeros[CFUSB_ALIGNMENT];
+       struct sk_buff *skb;
+       struct cfusbl *usbl = container_of(layr, struct cfusbl, layer);
+
+       skb = cfpkt_tonative(pkt);
+
+       skb_reset_network_header(skb);
+       skb->protocol = htons(ETH_P_IP);
+
+       info = cfpkt_info(pkt);
+       hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
+
+       if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
+               pr_warn("Headroom to small\n");
+               kfree_skb(skb);
+               return -EIO;
+       }
+       memset(zeros, 0, hpad);
+
+       cfpkt_add_head(pkt, zeros, hpad);
+       cfpkt_add_head(pkt, &hpad, 1);
+       cfpkt_add_head(pkt, usbl->tx_eth_hdr, sizeof(usbl->tx_eth_hdr));
+       return layr->dn->transmit(layr->dn, pkt);
+}
+
+static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+                                       int phyid)
+{
+       if (layr->up && layr->up->ctrlcmd)
+               layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
+
+struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
+                                       u8 braddr[ETH_ALEN])
+{
+       struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
+
+       if (!this) {
+               pr_warn("Out of memory\n");
+               return NULL;
+       }
+       caif_assert(offsetof(struct cfusbl, layer) == 0);
+
+       memset(this, 0, sizeof(struct cflayer));
+       this->layer.receive = cfusbl_receive;
+       this->layer.transmit = cfusbl_transmit;
+       this->layer.ctrlcmd = cfusbl_ctrlcmd;
+       snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "usb%d", phyid);
+       this->layer.id = phyid;
+
+       /*
+        * Construct TX ethernet header:
+        *      0-5     destination address
+        *      5-11    source address
+        *      12-13   protocol type
+        */
+       memcpy(&this->tx_eth_hdr[ETH_ALEN], braddr, ETH_ALEN);
+       memcpy(&this->tx_eth_hdr[ETH_ALEN], ethaddr, ETH_ALEN);
+       this->tx_eth_hdr[12] = cpu_to_be16(ETH_P_802_EX1) & 0xff;
+       this->tx_eth_hdr[13] = (cpu_to_be16(ETH_P_802_EX1) >> 8) & 0xff;
+       pr_debug("caif ethernet TX-header dst:%pM src:%pM type:%02x%02x\n",
+                       this->tx_eth_hdr, this->tx_eth_hdr + ETH_ALEN,
+                       this->tx_eth_hdr[12], this->tx_eth_hdr[13]);
+
+       return (struct cflayer *) this;
+}
+
+static struct packet_type caif_usb_type __read_mostly = {
+       .type = cpu_to_be16(ETH_P_802_EX1),
+};
+
+static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+                             void *arg)
+{
+       struct net_device *dev = arg;
+       struct caif_dev_common common;
+       struct cflayer *layer, *link_support;
+       struct usbnet   *usbnet = netdev_priv(dev);
+       struct usb_device       *usbdev = usbnet->udev;
+       struct ethtool_drvinfo drvinfo;
+
+       /*
+        * Quirks: High-jack ethtool to find if we have a NCM device,
+        * and find it's VID/PID.
+        */
+       if (dev->ethtool_ops == NULL || dev->ethtool_ops->get_drvinfo == NULL)
+               return 0;
+
+       dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+       if (strncmp(drvinfo.driver, "cdc_ncm", 7) != 0)
+               return 0;
+
+       pr_debug("USB CDC NCM device VID:0x%4x PID:0x%4x\n",
+               le16_to_cpu(usbdev->descriptor.idVendor),
+               le16_to_cpu(usbdev->descriptor.idProduct));
+
+       /* Check for VID/PID that supports CAIF */
+       if (!(le16_to_cpu(usbdev->descriptor.idVendor) == STE_USB_VID &&
+               le16_to_cpu(usbdev->descriptor.idProduct) == STE_USB_PID_CAIF))
+               return 0;
+
+       if (what == NETDEV_UNREGISTER)
+               module_put(THIS_MODULE);
+
+       if (what != NETDEV_REGISTER)
+               return 0;
+
+       __module_get(THIS_MODULE);
+
+       memset(&common, 0, sizeof(common));
+       common.use_frag = false;
+       common.use_fcs = false;
+       common.use_stx = false;
+       common.link_select = CAIF_LINK_HIGH_BANDW;
+       common.flowctrl = NULL;
+
+       link_support = cfusbl_create(dev->ifindex, dev->dev_addr,
+                                       dev->broadcast);
+
+       if (!link_support)
+               return -ENOMEM;
+
+       if (dev->num_tx_queues > 1)
+               pr_warn("USB device uses more than one tx queue\n");
+
+       caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+                       &layer, &caif_usb_type.func);
+       if (!pack_added)
+               dev_add_pack(&caif_usb_type);
+       pack_added = 1;
+
+       strncpy(layer->name, dev->name,
+                       sizeof(layer->name) - 1);
+       layer->name[sizeof(layer->name) - 1] = 0;
+
+       return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+       .notifier_call = cfusbl_device_notify,
+       .priority = 0,
+};
+
+static int __init cfusbl_init(void)
+{
+       return register_netdevice_notifier(&caif_device_notifier);
+}
+
+static void __exit cfusbl_exit(void)
+{
+       unregister_netdevice_notifier(&caif_device_notifier);
+       dev_remove_pack(&caif_usb_type);
+}
+
+module_init(cfusbl_init);
+module_exit(cfusbl_exit);
index 00523ec..598aafb 100644 (file)
@@ -45,8 +45,8 @@ struct cfcnfg_phyinfo {
        /* Interface index */
        int ifindex;
 
-       /* Use Start of frame extension */
-       bool use_stx;
+       /* Protocol head room added for CAIF link layer */
+       int head_room;
 
        /* Use Start of frame checksum */
        bool use_fcs;
@@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
        if (channel_id != 0) {
                struct cflayer *servl;
                servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
+               cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
                if (servl != NULL)
                        layer_set_up(servl, NULL);
        } else
                pr_debug("nothing to disconnect\n");
-       cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
 
        /* Do RCU sync before initiating cleanup */
        synchronize_rcu();
@@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
 
        *ifindex = phy->ifindex;
        *proto_tail = 2;
-       *proto_head =
-
-       protohead[param.linktype] + (phy->use_stx ? 1 : 0);
+       *proto_head = protohead[param.linktype] + phy->head_room;
 
        rcu_read_unlock();
 
@@ -460,13 +458,13 @@ unlock:
 }
 
 void
-cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
-                    bool fcs, bool stx)
+                    struct cflayer *link_support,
+                    bool fcs, int head_room)
 {
        struct cflayer *frml;
-       struct cflayer *phy_driver = NULL;
        struct cfcnfg_phyinfo *phyinfo = NULL;
        int i;
        u8 phyid;
@@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
                        goto got_phyid;
        }
        pr_warn("Too many CAIF Link Layers (max 6)\n");
-       goto out_err;
+       goto out;
 
 got_phyid:
        phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
        if (!phyinfo)
                goto out_err;
 
-       switch (phy_type) {
-       case CFPHYTYPE_FRAG:
-               phy_driver =
-                   cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
-               if (!phy_driver)
-                       goto out_err;
-               break;
-       case CFPHYTYPE_CAIF:
-               phy_driver = NULL;
-               break;
-       default:
-               goto out_err;
-       }
        phy_layer->id = phyid;
        phyinfo->pref = pref;
        phyinfo->id = phyid;
@@ -509,7 +494,7 @@ got_phyid:
        phyinfo->dev_info.dev = dev;
        phyinfo->phy_layer = phy_layer;
        phyinfo->ifindex = dev->ifindex;
-       phyinfo->use_stx = stx;
+       phyinfo->head_room = head_room;
        phyinfo->use_fcs = fcs;
 
        frml = cffrml_create(phyid, fcs);
@@ -519,23 +504,23 @@ got_phyid:
        phyinfo->frm_layer = frml;
        layer_set_up(frml, cnfg->mux);
 
-       if (phy_driver != NULL) {
-               phy_driver->id = phyid;
-               layer_set_dn(frml, phy_driver);
-               layer_set_up(phy_driver, frml);
-               layer_set_dn(phy_driver, phy_layer);
-               layer_set_up(phy_layer, phy_driver);
+       if (link_support != NULL) {
+               link_support->id = phyid;
+               layer_set_dn(frml, link_support);
+               layer_set_up(link_support, frml);
+               layer_set_dn(link_support, phy_layer);
+               layer_set_up(phy_layer, link_support);
        } else {
                layer_set_dn(frml, phy_layer);
                layer_set_up(phy_layer, frml);
        }
 
        list_add_rcu(&phyinfo->node, &cnfg->phys);
+out:
        mutex_unlock(&cnfg->lock);
        return;
 
 out_err:
-       kfree(phy_driver);
        kfree(phyinfo);
        mutex_unlock(&cnfg->lock);
 }
index df08c47..e335ba8 100644 (file)
@@ -63,7 +63,6 @@ static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
        return (struct cfpkt *) skb;
 }
 
-
 struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
 {
        struct cfpkt *pkt = skb_to_pkt(nativepkt);
@@ -105,14 +104,12 @@ void cfpkt_destroy(struct cfpkt *pkt)
        kfree_skb(skb);
 }
 
-
 inline bool cfpkt_more(struct cfpkt *pkt)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
        return skb->len > 0;
 }
 
-
 int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -144,9 +141,11 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
        }
        from = skb_pull(skb, len);
        from -= len;
-       memcpy(data, from, len);
+       if (data)
+               memcpy(data, from, len);
        return 0;
 }
+EXPORT_SYMBOL(cfpkt_extr_head);
 
 int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
 {
@@ -170,13 +169,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
        return 0;
 }
 
-
 int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
 {
        return cfpkt_add_body(pkt, NULL, len);
 }
 
-
 int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -255,21 +252,19 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
        memcpy(to, data, len);
        return 0;
 }
-
+EXPORT_SYMBOL(cfpkt_add_head);
 
 inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
 {
        return cfpkt_add_body(pkt, data, len);
 }
 
-
 inline u16 cfpkt_getlen(struct cfpkt *pkt)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
        return skb->len;
 }
 
-
 inline u16 cfpkt_iterate(struct cfpkt *pkt,
                            u16 (*iter_func)(u16, void *, u16),
                            u16 data)
@@ -287,7 +282,6 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
        return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
 }
 
-
 int cfpkt_setlen(struct cfpkt *pkt, u16 len)
 {
        struct sk_buff *skb = pkt_to_skb(pkt);
@@ -399,3 +393,4 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
 {
        return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
 }
+EXPORT_SYMBOL(cfpkt_info);
index 81660f8..6dc75d4 100644 (file)
@@ -190,7 +190,7 @@ out:
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
+       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
 
        /* Add info for MUX-layer to route the packet out. */
        cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
index 797c8d1..8e68b97 100644 (file)
@@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                                int phyid);
 
-struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+struct cflayer *cfserl_create(int instance, bool use_stx)
 {
        struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
        if (!this)
@@ -40,7 +40,6 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
        this->layer.receive = cfserl_receive;
        this->layer.transmit = cfserl_transmit;
        this->layer.ctrlcmd = cfserl_ctrlcmd;
-       this->layer.type = type;
        this->usestx = use_stx;
        spin_lock_init(&this->sync);
        snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
index 0d357b1..674641b 100644 (file)
@@ -3,12 +3,13 @@
 #
 
 obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
-        gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
+        gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
 
 obj-y               += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
-                       neighbour.o rtnetlink.o utils.o link_watch.o filter.o
+                       neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
+                       sock_diag.o
 
 obj-$(CONFIG_XFRM) += flow.o
 obj-y += net-sysfs.o
@@ -19,3 +20,4 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o
 obj-$(CONFIG_TRACEPOINTS) += net-traces.o
 obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
 obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
+obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o
index 5a13edf..f494675 100644 (file)
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
-#include <linux/if_tunnel.h>
-#include <linux/if_pppox.h>
-#include <linux/ppp_defs.h>
 #include <linux/net_tstamp.h>
+#include <linux/jump_label.h>
+#include <net/flow_keys.h>
 
 #include "net-sysfs.h"
 
@@ -1320,8 +1319,6 @@ EXPORT_SYMBOL(dev_close);
  */
 void dev_disable_lro(struct net_device *dev)
 {
-       u32 flags;
-
        /*
         * If we're trying to disable lro on a vlan device
         * use the underlying physical device instead
@@ -1329,15 +1326,9 @@ void dev_disable_lro(struct net_device *dev)
        if (is_vlan_dev(dev))
                dev = vlan_dev_real_dev(dev);
 
-       if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
-               flags = dev->ethtool_ops->get_flags(dev);
-       else
-               flags = ethtool_op_get_flags(dev);
+       dev->wanted_features &= ~NETIF_F_LRO;
+       netdev_update_features(dev);
 
-       if (!(flags & ETH_FLAG_LRO))
-               return;
-
-       __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
        if (unlikely(dev->features & NETIF_F_LRO))
                netdev_WARN(dev, "failed to disable LRO!\n");
 }
@@ -1450,34 +1441,55 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
+static struct jump_label_key netstamp_needed __read_mostly;
+#ifdef HAVE_JUMP_LABEL
+/* We are not allowed to call jump_label_dec() from irq context
+ * If net_disable_timestamp() is called from irq context, defer the
+ * jump_label_dec() calls.
+ */
+static atomic_t netstamp_needed_deferred;
+#endif
 
 void net_enable_timestamp(void)
 {
-       atomic_inc(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+       int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+
+       if (deferred) {
+               while (--deferred)
+                       jump_label_dec(&netstamp_needed);
+               return;
+       }
+#endif
+       WARN_ON(in_interrupt());
+       jump_label_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
 void net_disable_timestamp(void)
 {
-       atomic_dec(&netstamp_needed);
+#ifdef HAVE_JUMP_LABEL
+       if (in_interrupt()) {
+               atomic_inc(&netstamp_needed_deferred);
+               return;
+       }
+#endif
+       jump_label_dec(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
 static inline void net_timestamp_set(struct sk_buff *skb)
 {
-       if (atomic_read(&netstamp_needed))
+       skb->tstamp.tv64 = 0;
+       if (static_branch(&netstamp_needed))
                __net_timestamp(skb);
-       else
-               skb->tstamp.tv64 = 0;
 }
 
-static inline void net_timestamp_check(struct sk_buff *skb)
-{
-       if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
-               __net_timestamp(skb);
-}
+#define net_timestamp_check(COND, SKB)                 \
+       if (static_branch(&netstamp_needed)) {          \
+               if ((COND) && !(SKB)->tstamp.tv64)      \
+                       __net_timestamp(SKB);           \
+       }                                               \
 
 static int net_hwtstamp_validate(struct ifreq *ifr)
 {
@@ -1924,7 +1936,8 @@ EXPORT_SYMBOL(skb_checksum_help);
  *     It may return NULL if the skb requires no segmentation.  This is
  *     only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
@@ -1954,9 +1967,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
                if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
                        dev->ethtool_ops->get_drvinfo(dev, &info);
 
-               WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
-                    info.driver, dev ? dev->features : 0L,
-                    skb->sk ? skb->sk->sk_route_caps : 0L,
+               WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
+                    info.driver, dev ? &dev->features : NULL,
+                    skb->sk ? &skb->sk->sk_route_caps : NULL,
                     skb->len, skb->data_len, skb->ip_summed);
 
                if (skb_header_cloned(skb) &&
@@ -2065,7 +2078,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
  *     This function segments the given skb and stores the list of segments
  *     in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb, int features)
+static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
        struct sk_buff *segs;
 
@@ -2104,7 +2117,7 @@ static inline void skb_orphan_try(struct sk_buff *skb)
        }
 }
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
                ((features & NETIF_F_V4_CSUM) &&
@@ -2115,7 +2128,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
                 protocol == htons(ETH_P_FCOE)));
 }
 
-static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+static netdev_features_t harmonize_features(struct sk_buff *skb,
+       __be16 protocol, netdev_features_t features)
 {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
@@ -2127,10 +2141,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features
        return features;
 }
 
-u32 netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_features(struct sk_buff *skb)
 {
        __be16 protocol = skb->protocol;
-       u32 features = skb->dev->features;
+       netdev_features_t features = skb->dev->features;
 
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2176,7 +2190,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        unsigned int skb_len;
 
        if (likely(!skb->next)) {
-               u32 features;
+               netdev_features_t features;
 
                /*
                 * If device doesn't need skb->dst, release it right now while
@@ -2257,7 +2271,7 @@ gso:
                        return rc;
                }
                txq_trans_update(txq);
-               if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+               if (unlikely(netif_xmit_stopped(txq) && skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
 
@@ -2457,6 +2471,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        return rc;
 }
 
+#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+static void skb_update_prio(struct sk_buff *skb)
+{
+       struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+
+       if ((!skb->priority) && (skb->sk) && map)
+               skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+}
+#else
+#define skb_update_prio(skb)
+#endif
+
 static DEFINE_PER_CPU(int, xmit_recursion);
 #define RECURSION_LIMIT 10
 
@@ -2497,6 +2523,8 @@ int dev_queue_xmit(struct sk_buff *skb)
         */
        rcu_read_lock_bh();
 
+       skb_update_prio(skb);
+
        txq = dev_pick_tx(dev, skb);
        q = rcu_dereference_bh(txq->qdisc);
 
@@ -2531,7 +2559,7 @@ int dev_queue_xmit(struct sk_buff *skb)
 
                        HARD_TX_LOCK(dev, txq, cpu);
 
-                       if (!netif_tx_queue_stopped(txq)) {
+                       if (!netif_xmit_stopped(txq)) {
                                __this_cpu_inc(xmit_recursion);
                                rc = dev_hard_start_xmit(skb, dev, txq);
                                __this_cpu_dec(xmit_recursion);
@@ -2592,123 +2620,28 @@ static inline void ____napi_schedule(struct softnet_data *sd,
  */
 void __skb_get_rxhash(struct sk_buff *skb)
 {
-       int nhoff, hash = 0, poff;
-       const struct ipv6hdr *ip6;
-       const struct iphdr *ip;
-       const struct vlan_hdr *vlan;
-       u8 ip_proto;
-       u32 addr1, addr2;
-       u16 proto;
-       union {
-               u32 v32;
-               u16 v16[2];
-       } ports;
-
-       nhoff = skb_network_offset(skb);
-       proto = skb->protocol;
-
-again:
-       switch (proto) {
-       case __constant_htons(ETH_P_IP):
-ip:
-               if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
-                       goto done;
-
-               ip = (const struct iphdr *) (skb->data + nhoff);
-               if (ip_is_fragment(ip))
-                       ip_proto = 0;
-               else
-                       ip_proto = ip->protocol;
-               addr1 = (__force u32) ip->saddr;
-               addr2 = (__force u32) ip->daddr;
-               nhoff += ip->ihl * 4;
-               break;
-       case __constant_htons(ETH_P_IPV6):
-ipv6:
-               if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
-                       goto done;
-
-               ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
-               ip_proto = ip6->nexthdr;
-               addr1 = (__force u32) ip6->saddr.s6_addr32[3];
-               addr2 = (__force u32) ip6->daddr.s6_addr32[3];
-               nhoff += 40;
-               break;
-       case __constant_htons(ETH_P_8021Q):
-               if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
-                       goto done;
-               vlan = (const struct vlan_hdr *) (skb->data + nhoff);
-               proto = vlan->h_vlan_encapsulated_proto;
-               nhoff += sizeof(*vlan);
-               goto again;
-       case __constant_htons(ETH_P_PPP_SES):
-               if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
-                       goto done;
-               proto = *((__be16 *) (skb->data + nhoff +
-                                     sizeof(struct pppoe_hdr)));
-               nhoff += PPPOE_SES_HLEN;
-               switch (proto) {
-               case __constant_htons(PPP_IP):
-                       goto ip;
-               case __constant_htons(PPP_IPV6):
-                       goto ipv6;
-               default:
-                       goto done;
-               }
-       default:
-               goto done;
-       }
-
-       switch (ip_proto) {
-       case IPPROTO_GRE:
-               if (pskb_may_pull(skb, nhoff + 16)) {
-                       u8 *h = skb->data + nhoff;
-                       __be16 flags = *(__be16 *)h;
+       struct flow_keys keys;
+       u32 hash;
 
-                       /*
-                        * Only look inside GRE if version zero and no
-                        * routing
-                        */
-                       if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
-                               proto = *(__be16 *)(h + 2);
-                               nhoff += 4;
-                               if (flags & GRE_CSUM)
-                                       nhoff += 4;
-                               if (flags & GRE_KEY)
-                                       nhoff += 4;
-                               if (flags & GRE_SEQ)
-                                       nhoff += 4;
-                               goto again;
-                       }
-               }
-               break;
-       case IPPROTO_IPIP:
-               goto again;
-       default:
-               break;
-       }
+       if (!skb_flow_dissect(skb, &keys))
+               return;
 
-       ports.v32 = 0;
-       poff = proto_ports_offset(ip_proto);
-       if (poff >= 0) {
-               nhoff += poff;
-               if (pskb_may_pull(skb, nhoff + 4)) {
-                       ports.v32 = * (__force u32 *) (skb->data + nhoff);
-                       if (ports.v16[1] < ports.v16[0])
-                               swap(ports.v16[0], ports.v16[1]);
-                       skb->l4_rxhash = 1;
-               }
+       if (keys.ports) {
+               if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
+                       swap(keys.port16[0], keys.port16[1]);
+               skb->l4_rxhash = 1;
        }
 
        /* get a consistent hash (same value on both flow directions) */
-       if (addr2 < addr1)
-               swap(addr1, addr2);
+       if ((__force u32)keys.dst < (__force u32)keys.src)
+               swap(keys.dst, keys.src);
 
-       hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+       hash = jhash_3words((__force u32)keys.dst,
+                           (__force u32)keys.src,
+                           (__force u32)keys.ports, hashrnd);
        if (!hash)
                hash = 1;
 
-done:
        skb->rxhash = hash;
 }
 EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2719,6 +2652,8 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+struct jump_label_key rps_needed __read_mostly;
+
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
@@ -2998,12 +2933,11 @@ int netif_rx(struct sk_buff *skb)
        if (netpoll_rx(skb))
                return NET_RX_DROP;
 
-       if (netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        trace_netif_rx(skb);
 #ifdef CONFIG_RPS
-       {
+       if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
@@ -3018,14 +2952,13 @@ int netif_rx(struct sk_buff *skb)
 
                rcu_read_unlock();
                preempt_enable();
-       }
-#else
+       } else
+#endif
        {
                unsigned int qtail;
                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
                put_cpu();
        }
-#endif
        return ret;
 }
 EXPORT_SYMBOL(netif_rx);
@@ -3231,8 +3164,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
        int ret = NET_RX_DROP;
        __be16 type;
 
-       if (!netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(!netdev_tstamp_prequeue, skb);
 
        trace_netif_receive_skb(skb);
 
@@ -3363,14 +3295,13 @@ out:
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
-       if (netdev_tstamp_prequeue)
-               net_timestamp_check(skb);
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
 
 #ifdef CONFIG_RPS
-       {
+       if (static_branch(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu, ret;
 
@@ -3381,16 +3312,12 @@ int netif_receive_skb(struct sk_buff *skb)
                if (cpu >= 0) {
                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        rcu_read_unlock();
-               } else {
-                       rcu_read_unlock();
-                       ret = __netif_receive_skb(skb);
+                       return ret;
                }
-
-               return ret;
+               rcu_read_unlock();
        }
-#else
-       return __netif_receive_skb(skb);
 #endif
+       return __netif_receive_skb(skb);
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -4539,7 +4466,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
 
 static int __dev_set_promiscuity(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        uid_t uid;
        gid_t gid;
 
@@ -4596,7 +4523,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
  */
 int dev_set_promiscuity(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        int err;
 
        err = __dev_set_promiscuity(dev, inc);
@@ -4623,7 +4550,7 @@ EXPORT_SYMBOL(dev_set_promiscuity);
 
 int dev_set_allmulti(struct net_device *dev, int inc)
 {
-       unsigned short old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
 
        ASSERT_RTNL();
 
@@ -4726,7 +4653,7 @@ EXPORT_SYMBOL(dev_get_flags);
 
 int __dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-       int old_flags = dev->flags;
+       unsigned int old_flags = dev->flags;
        int ret;
 
        ASSERT_RTNL();
@@ -4809,10 +4736,10 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
  *     Change settings on device based state flags. The flags are
  *     in the userspace exported format.
  */
-int dev_change_flags(struct net_device *dev, unsigned flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
-       int ret, changes;
-       int old_flags = dev->flags;
+       int ret;
+       unsigned int changes, old_flags = dev->flags;
 
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
@@ -5369,7 +5296,8 @@ static void rollback_registered(struct net_device *dev)
        list_del(&single);
 }
 
-static u32 netdev_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netdev_fix_features(struct net_device *dev,
+       netdev_features_t features)
 {
        /* Fix illegal checksum combinations */
        if ((features & NETIF_F_HW_CSUM) &&
@@ -5378,12 +5306,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
        }
 
-       if ((features & NETIF_F_NO_CSUM) &&
-           (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               netdev_warn(dev, "mixed no checksumming and other settings.\n");
-               features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-       }
-
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
@@ -5431,7 +5353,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features)
 
 int __netdev_update_features(struct net_device *dev)
 {
-       u32 features;
+       netdev_features_t features;
        int err = 0;
 
        ASSERT_RTNL();
@@ -5447,16 +5369,16 @@ int __netdev_update_features(struct net_device *dev)
        if (dev->features == features)
                return 0;
 
-       netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
-               dev->features, features);
+       netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
+               &dev->features, &features);
 
        if (dev->netdev_ops->ndo_set_features)
                err = dev->netdev_ops->ndo_set_features(dev, features);
 
        if (unlikely(err < 0)) {
                netdev_err(dev,
-                       "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
-                       err, features, dev->features);
+                       "set_features() failed (%d); wanted %pNF, left %pNF\n",
+                       err, &features, &dev->features);
                return -1;
        }
 
@@ -5555,6 +5477,9 @@ static void netdev_init_one_queue(struct net_device *dev,
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
+#ifdef CONFIG_BQL
+       dql_init(&queue->dql, HZ);
+#endif
 }
 
 static int netif_alloc_netdev_queues(struct net_device *dev)
@@ -5640,11 +5565,12 @@ int register_netdevice(struct net_device *dev)
        dev->wanted_features = dev->features & dev->hw_features;
 
        /* Turn on no cache copy if HW is doing checksum */
-       dev->hw_features |= NETIF_F_NOCACHE_COPY;
-       if ((dev->features & NETIF_F_ALL_CSUM) &&
-           !(dev->features & NETIF_F_NO_CSUM)) {
-               dev->wanted_features |= NETIF_F_NOCACHE_COPY;
-               dev->features |= NETIF_F_NOCACHE_COPY;
+       if (!(dev->flags & IFF_LOOPBACK)) {
+               dev->hw_features |= NETIF_F_NOCACHE_COPY;
+               if (dev->features & NETIF_F_ALL_CSUM) {
+                       dev->wanted_features |= NETIF_F_NOCACHE_COPY;
+                       dev->features |= NETIF_F_NOCACHE_COPY;
+               }
        }
 
        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
@@ -6380,7 +6306,8 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  *     @one to the master device with current feature set @all.  Will not
  *     enable anything that is off in @mask. Returns the new feature set.
  */
-u32 netdev_increment_features(u32 all, u32 one, u32 mask)
+netdev_features_t netdev_increment_features(netdev_features_t all,
+       netdev_features_t one, netdev_features_t mask)
 {
        if (mask & NETIF_F_GEN_CSUM)
                mask |= NETIF_F_ALL_CSUM;
@@ -6389,10 +6316,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
        all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
        all &= one | ~NETIF_F_ALL_FOR_ALL;
 
-       /* If device needs checksumming, downgrade to it. */
-       if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
-               all &= ~NETIF_F_NO_CSUM;
-
        /* If one device supports hw checksumming, set for all. */
        if (all & NETIF_F_GEN_CSUM)
                all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
index d5e2c4c..43d94ce 100644 (file)
@@ -366,7 +366,7 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
                dev_hold(dst->dev);
                dev_put(dev);
                rcu_read_lock();
-               neigh = dst_get_neighbour(dst);
+               neigh = dst_get_neighbour_noref(dst);
                if (neigh && neigh->dev == dev) {
                        neigh->dev = dst->dev;
                        dev_hold(dst->dev);
index f444817..31b0b7f 100644 (file)
@@ -36,235 +36,44 @@ u32 ethtool_op_get_link(struct net_device *dev)
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
-u32 ethtool_op_get_tx_csum(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tx_csum);
-
-int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_IP_CSUM;
-       else
-               dev->features &= ~NETIF_F_IP_CSUM;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-
-int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_HW_CSUM;
-       else
-               dev->features &= ~NETIF_F_HW_CSUM;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
-
-int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-       else
-               dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
-
-u32 ethtool_op_get_sg(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_SG) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_sg);
-
-int ethtool_op_set_sg(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_SG;
-       else
-               dev->features &= ~NETIF_F_SG;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_sg);
-
-u32 ethtool_op_get_tso(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_TSO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_tso);
-
-int ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_TSO;
-       else
-               dev->features &= ~NETIF_F_TSO;
-
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_tso);
-
-u32 ethtool_op_get_ufo(struct net_device *dev)
-{
-       return (dev->features & NETIF_F_UFO) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_ufo);
-
-int ethtool_op_set_ufo(struct net_device *dev, u32 data)
-{
-       if (data)
-               dev->features |= NETIF_F_UFO;
-       else
-               dev->features &= ~NETIF_F_UFO;
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_ufo);
-
-/* the following list of flags are the same as their associated
- * NETIF_F_xxx values in include/linux/netdevice.h
- */
-static const u32 flags_dup_features =
-       (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE |
-        ETH_FLAG_RXHASH);
-
-u32 ethtool_op_get_flags(struct net_device *dev)
-{
-       /* in the future, this function will probably contain additional
-        * handling for flags which are not so easily handled
-        * by a simple masking operation
-        */
-
-       return dev->features & flags_dup_features;
-}
-EXPORT_SYMBOL(ethtool_op_get_flags);
-
-/* Check if device can enable (or disable) particular feature coded in "data"
- * argument. Flags "supported" describe features that can be toggled by device.
- * If feature can not be toggled, it state (enabled or disabled) must match
- * hardcoded device features state, otherwise flags are marked as invalid.
- */
-bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
-{
-       u32 features = dev->features & flags_dup_features;
-       /* "data" can contain only flags_dup_features bits,
-        * see __ethtool_set_flags */
-
-       return (features & ~supported) != (data & ~supported);
-}
-EXPORT_SYMBOL(ethtool_invalid_flags);
-
-int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
-{
-       if (ethtool_invalid_flags(dev, data, supported))
-               return -EINVAL;
-
-       dev->features = ((dev->features & ~flags_dup_features) |
-                        (data & flags_dup_features));
-       return 0;
-}
-EXPORT_SYMBOL(ethtool_op_set_flags);
-
 /* Handlers for each ethtool command */
 
-#define ETHTOOL_DEV_FEATURE_WORDS      1
-
-static void ethtool_get_features_compat(struct net_device *dev,
-       struct ethtool_get_features_block *features)
-{
-       if (!dev->ethtool_ops)
-               return;
-
-       /* getting RX checksum */
-       if (dev->ethtool_ops->get_rx_csum)
-               if (dev->ethtool_ops->get_rx_csum(dev))
-                       features[0].active |= NETIF_F_RXCSUM;
-
-       /* mark legacy-changeable features */
-       if (dev->ethtool_ops->set_sg)
-               features[0].available |= NETIF_F_SG;
-       if (dev->ethtool_ops->set_tx_csum)
-               features[0].available |= NETIF_F_ALL_CSUM;
-       if (dev->ethtool_ops->set_tso)
-               features[0].available |= NETIF_F_ALL_TSO;
-       if (dev->ethtool_ops->set_rx_csum)
-               features[0].available |= NETIF_F_RXCSUM;
-       if (dev->ethtool_ops->set_flags)
-               features[0].available |= flags_dup_features;
-}
-
-static int ethtool_set_feature_compat(struct net_device *dev,
-       int (*legacy_set)(struct net_device *, u32),
-       struct ethtool_set_features_block *features, u32 mask)
-{
-       u32 do_set;
-
-       if (!legacy_set)
-               return 0;
-
-       if (!(features[0].valid & mask))
-               return 0;
-
-       features[0].valid &= ~mask;
-
-       do_set = !!(features[0].requested & mask);
-
-       if (legacy_set(dev, do_set) < 0)
-               netdev_info(dev,
-                       "Legacy feature change (%s) failed for 0x%08x\n",
-                       do_set ? "set" : "clear", mask);
-
-       return 1;
-}
-
-static int ethtool_set_flags_compat(struct net_device *dev,
-       int (*legacy_set)(struct net_device *, u32),
-       struct ethtool_set_features_block *features, u32 mask)
-{
-       u32 value;
-
-       if (!legacy_set)
-               return 0;
-
-       if (!(features[0].valid & mask))
-               return 0;
-
-       value = dev->features & ~features[0].valid;
-       value |= features[0].requested;
-
-       features[0].valid &= ~mask;
-
-       if (legacy_set(dev, value & mask) < 0)
-               netdev_info(dev, "Legacy flags change failed\n");
-
-       return 1;
-}
-
-static int ethtool_set_features_compat(struct net_device *dev,
-       struct ethtool_set_features_block *features)
-{
-       int compat;
-
-       if (!dev->ethtool_ops)
-               return 0;
-
-       compat  = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
-               features, NETIF_F_SG);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
-               features, NETIF_F_ALL_CSUM);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
-               features, NETIF_F_ALL_TSO);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
-               features, NETIF_F_RXCSUM);
-       compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
-               features, flags_dup_features);
-
-       return compat;
-}
+#define ETHTOOL_DEV_FEATURE_WORDS      ((NETDEV_FEATURE_COUNT + 31) / 32)
+
+static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
+       [NETIF_F_SG_BIT] =               "tx-scatter-gather",
+       [NETIF_F_IP_CSUM_BIT] =          "tx-checksum-ipv4",
+       [NETIF_F_HW_CSUM_BIT] =          "tx-checksum-ip-generic",
+       [NETIF_F_IPV6_CSUM_BIT] =        "tx-checksum-ipv6",
+       [NETIF_F_HIGHDMA_BIT] =          "highdma",
+       [NETIF_F_FRAGLIST_BIT] =         "tx-scatter-gather-fraglist",
+       [NETIF_F_HW_VLAN_TX_BIT] =       "tx-vlan-hw-insert",
+
+       [NETIF_F_HW_VLAN_RX_BIT] =       "rx-vlan-hw-parse",
+       [NETIF_F_HW_VLAN_FILTER_BIT] =   "rx-vlan-filter",
+       [NETIF_F_VLAN_CHALLENGED_BIT] =  "vlan-challenged",
+       [NETIF_F_GSO_BIT] =              "tx-generic-segmentation",
+       [NETIF_F_LLTX_BIT] =             "tx-lockless",
+       [NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
+       [NETIF_F_GRO_BIT] =              "rx-gro",
+       [NETIF_F_LRO_BIT] =              "rx-lro",
+
+       [NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
+       [NETIF_F_UFO_BIT] =              "tx-udp-fragmentation",
+       [NETIF_F_GSO_ROBUST_BIT] =       "tx-gso-robust",
+       [NETIF_F_TSO_ECN_BIT] =          "tx-tcp-ecn-segmentation",
+       [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
+       [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
+
+       [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
+       [NETIF_F_SCTP_CSUM_BIT] =        "tx-checksum-sctp",
+       [NETIF_F_FCOE_MTU_BIT] =         "fcoe-mtu",
+       [NETIF_F_NTUPLE_BIT] =           "rx-ntuple-filter",
+       [NETIF_F_RXHASH_BIT] =           "rx-hashing",
+       [NETIF_F_RXCSUM_BIT] =           "rx-checksum",
+       [NETIF_F_NOCACHE_COPY_BIT] =     "tx-nocache-copy",
+       [NETIF_F_LOOPBACK_BIT] =         "loopback",
+};
 
 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
 {
@@ -272,18 +81,21 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
                .cmd = ETHTOOL_GFEATURES,
                .size = ETHTOOL_DEV_FEATURE_WORDS,
        };
-       struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
-               {
-                       .available = dev->hw_features,
-                       .requested = dev->wanted_features,
-                       .active = dev->features,
-                       .never_changed = NETIF_F_NEVER_CHANGE,
-               },
-       };
+       struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
        u32 __user *sizeaddr;
        u32 copy_size;
+       int i;
 
-       ethtool_get_features_compat(dev, features);
+       /* in case feature bits run out again */
+       BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t));
+
+       for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+               features[i].available = (u32)(dev->hw_features >> (32 * i));
+               features[i].requested = (u32)(dev->wanted_features >> (32 * i));
+               features[i].active = (u32)(dev->features >> (32 * i));
+               features[i].never_changed =
+                       (u32)(NETIF_F_NEVER_CHANGE >> (32 * i));
+       }
 
        sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
        if (get_user(copy_size, sizeaddr))
@@ -305,7 +117,8 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
 {
        struct ethtool_sfeatures cmd;
        struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
-       int ret = 0;
+       netdev_features_t wanted = 0, valid = 0;
+       int i, ret = 0;
 
        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
                return -EFAULT;
@@ -317,65 +130,29 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
        if (copy_from_user(features, useraddr, sizeof(features)))
                return -EFAULT;
 
-       if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
-               return -EINVAL;
+       for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) {
+               valid |= (netdev_features_t)features[i].valid << (32 * i);
+               wanted |= (netdev_features_t)features[i].requested << (32 * i);
+       }
 
-       if (ethtool_set_features_compat(dev, features))
-               ret |= ETHTOOL_F_COMPAT;
+       if (valid & ~NETIF_F_ETHTOOL_BITS)
+               return -EINVAL;
 
-       if (features[0].valid & ~dev->hw_features) {
-               features[0].valid &= dev->hw_features;
+       if (valid & ~dev->hw_features) {
+               valid &= dev->hw_features;
                ret |= ETHTOOL_F_UNSUPPORTED;
        }
 
-       dev->wanted_features &= ~features[0].valid;
-       dev->wanted_features |= features[0].valid & features[0].requested;
+       dev->wanted_features &= ~valid;
+       dev->wanted_features |= wanted & valid;
        __netdev_update_features(dev);
 
-       if ((dev->wanted_features ^ dev->features) & features[0].valid)
+       if ((dev->wanted_features ^ dev->features) & valid)
                ret |= ETHTOOL_F_WISH;
 
        return ret;
 }
 
-static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
-       /* NETIF_F_SG */              "tx-scatter-gather",
-       /* NETIF_F_IP_CSUM */         "tx-checksum-ipv4",
-       /* NETIF_F_NO_CSUM */         "tx-checksum-unneeded",
-       /* NETIF_F_HW_CSUM */         "tx-checksum-ip-generic",
-       /* NETIF_F_IPV6_CSUM */       "tx-checksum-ipv6",
-       /* NETIF_F_HIGHDMA */         "highdma",
-       /* NETIF_F_FRAGLIST */        "tx-scatter-gather-fraglist",
-       /* NETIF_F_HW_VLAN_TX */      "tx-vlan-hw-insert",
-
-       /* NETIF_F_HW_VLAN_RX */      "rx-vlan-hw-parse",
-       /* NETIF_F_HW_VLAN_FILTER */  "rx-vlan-filter",
-       /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
-       /* NETIF_F_GSO */             "tx-generic-segmentation",
-       /* NETIF_F_LLTX */            "tx-lockless",
-       /* NETIF_F_NETNS_LOCAL */     "netns-local",
-       /* NETIF_F_GRO */             "rx-gro",
-       /* NETIF_F_LRO */             "rx-lro",
-
-       /* NETIF_F_TSO */             "tx-tcp-segmentation",
-       /* NETIF_F_UFO */             "tx-udp-fragmentation",
-       /* NETIF_F_GSO_ROBUST */      "tx-gso-robust",
-       /* NETIF_F_TSO_ECN */         "tx-tcp-ecn-segmentation",
-       /* NETIF_F_TSO6 */            "tx-tcp6-segmentation",
-       /* NETIF_F_FSO */             "tx-fcoe-segmentation",
-       "",
-       "",
-
-       /* NETIF_F_FCOE_CRC */        "tx-checksum-fcoe-crc",
-       /* NETIF_F_SCTP_CSUM */       "tx-checksum-sctp",
-       /* NETIF_F_FCOE_MTU */        "fcoe-mtu",
-       /* NETIF_F_NTUPLE */          "rx-ntuple-filter",
-       /* NETIF_F_RXHASH */          "rx-hashing",
-       /* NETIF_F_RXCSUM */          "rx-checksum",
-       /* NETIF_F_NOCACHE_COPY */    "tx-nocache-copy",
-       /* NETIF_F_LOOPBACK */        "loopback",
-};
-
 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
 {
        const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -402,7 +179,7 @@ static void __ethtool_get_strings(struct net_device *dev,
                ops->get_strings(dev, stringset, data);
 }
 
-static u32 ethtool_get_feature_mask(u32 eth_cmd)
+static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd)
 {
        /* feature masks of legacy discrete ethtool ops */
 
@@ -433,136 +210,82 @@ static u32 ethtool_get_feature_mask(u32 eth_cmd)
        }
 }
 
-static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
-{
-       const struct ethtool_ops *ops = dev->ethtool_ops;
-
-       if (!ops)
-               return NULL;
-
-       switch (ethcmd) {
-       case ETHTOOL_GTXCSUM:
-               return ops->get_tx_csum;
-       case ETHTOOL_GRXCSUM:
-               return ops->get_rx_csum;
-       case ETHTOOL_SSG:
-               return ops->get_sg;
-       case ETHTOOL_STSO:
-               return ops->get_tso;
-       case ETHTOOL_SUFO:
-               return ops->get_ufo;
-       default:
-               return NULL;
-       }
-}
-
-static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
-{
-       return !!(dev->features & NETIF_F_ALL_CSUM);
-}
-
 static int ethtool_get_one_feature(struct net_device *dev,
        char __user *useraddr, u32 ethcmd)
 {
-       u32 mask = ethtool_get_feature_mask(ethcmd);
+       netdev_features_t mask = ethtool_get_feature_mask(ethcmd);
        struct ethtool_value edata = {
                .cmd = ethcmd,
                .data = !!(dev->features & mask),
        };
 
-       /* compatibility with discrete get_ ops */
-       if (!(dev->hw_features & mask)) {
-               u32 (*actor)(struct net_device *);
-
-               actor = __ethtool_get_one_feature_actor(dev, ethcmd);
-
-               /* bug compatibility with old get_rx_csum */
-               if (ethcmd == ETHTOOL_GRXCSUM && !actor)
-                       actor = __ethtool_get_rx_csum_oldbug;
-
-               if (actor)
-                       edata.data = actor(dev);
-       }
-
        if (copy_to_user(useraddr, &edata, sizeof(edata)))
                return -EFAULT;
        return 0;
 }
 
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
-static int __ethtool_set_sg(struct net_device *dev, u32 data);
-static int __ethtool_set_tso(struct net_device *dev, u32 data);
-static int __ethtool_set_ufo(struct net_device *dev, u32 data);
-
 static int ethtool_set_one_feature(struct net_device *dev,
        void __user *useraddr, u32 ethcmd)
 {
        struct ethtool_value edata;
-       u32 mask;
+       netdev_features_t mask;
 
        if (copy_from_user(&edata, useraddr, sizeof(edata)))
                return -EFAULT;
 
        mask = ethtool_get_feature_mask(ethcmd);
        mask &= dev->hw_features;
-       if (mask) {
-               if (edata.data)
-                       dev->wanted_features |= mask;
-               else
-                       dev->wanted_features &= ~mask;
+       if (!mask)
+               return -EOPNOTSUPP;
 
-               __netdev_update_features(dev);
-               return 0;
-       }
+       if (edata.data)
+               dev->wanted_features |= mask;
+       else
+               dev->wanted_features &= ~mask;
 
-       /* Driver is not converted to ndo_fix_features or does not
-        * support changing this offload. In the latter case it won't
-        * have corresponding ethtool_ops field set.
-        *
-        * Following part is to be removed after all drivers advertise
-        * their changeable features in netdev->hw_features and stop
-        * using discrete offload setting ops.
-        */
+       __netdev_update_features(dev);
 
-       switch (ethcmd) {
-       case ETHTOOL_STXCSUM:
-               return __ethtool_set_tx_csum(dev, edata.data);
-       case ETHTOOL_SRXCSUM:
-               return __ethtool_set_rx_csum(dev, edata.data);
-       case ETHTOOL_SSG:
-               return __ethtool_set_sg(dev, edata.data);
-       case ETHTOOL_STSO:
-               return __ethtool_set_tso(dev, edata.data);
-       case ETHTOOL_SUFO:
-               return __ethtool_set_ufo(dev, edata.data);
-       default:
-               return -EOPNOTSUPP;
-       }
+       return 0;
+}
+
+#define ETH_ALL_FLAGS    (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \
+                         ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH)
+#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \
+                         NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH)
+
+static u32 __ethtool_get_flags(struct net_device *dev)
+{
+       u32 flags = 0;
+
+       if (dev->features & NETIF_F_LRO)        flags |= ETH_FLAG_LRO;
+       if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN;
+       if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN;
+       if (dev->features & NETIF_F_NTUPLE)     flags |= ETH_FLAG_NTUPLE;
+       if (dev->features & NETIF_F_RXHASH)     flags |= ETH_FLAG_RXHASH;
+
+       return flags;
 }
 
-int __ethtool_set_flags(struct net_device *dev, u32 data)
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
 {
-       u32 changed;
+       netdev_features_t features = 0, changed;
 
-       if (data & ~flags_dup_features)
+       if (data & ~ETH_ALL_FLAGS)
                return -EINVAL;
 
-       /* legacy set_flags() op */
-       if (dev->ethtool_ops->set_flags) {
-               if (unlikely(dev->hw_features & flags_dup_features))
-                       netdev_warn(dev,
-                               "driver BUG: mixed hw_features and set_flags()\n");
-               return dev->ethtool_ops->set_flags(dev, data);
-       }
+       if (data & ETH_FLAG_LRO)        features |= NETIF_F_LRO;
+       if (data & ETH_FLAG_RXVLAN)     features |= NETIF_F_HW_VLAN_RX;
+       if (data & ETH_FLAG_TXVLAN)     features |= NETIF_F_HW_VLAN_TX;
+       if (data & ETH_FLAG_NTUPLE)     features |= NETIF_F_NTUPLE;
+       if (data & ETH_FLAG_RXHASH)     features |= NETIF_F_RXHASH;
 
        /* allow changing only bits set in hw_features */
-       changed = (data ^ dev->features) & flags_dup_features;
+       changed = (features ^ dev->features) & ETH_ALL_FEATURES;
        if (changed & ~dev->hw_features)
                return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
 
        dev->wanted_features =
-               (dev->wanted_features & ~changed) | (data & dev->hw_features);
+               (dev->wanted_features & ~changed) | (features & changed);
 
        __netdev_update_features(dev);
 
@@ -1231,81 +954,6 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
        return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
 }
 
-static int __ethtool_set_sg(struct net_device *dev, u32 data)
-{
-       int err;
-
-       if (!dev->ethtool_ops->set_sg)
-               return -EOPNOTSUPP;
-
-       if (data && !(dev->features & NETIF_F_ALL_CSUM))
-               return -EINVAL;
-
-       if (!data && dev->ethtool_ops->set_tso) {
-               err = dev->ethtool_ops->set_tso(dev, 0);
-               if (err)
-                       return err;
-       }
-
-       if (!data && dev->ethtool_ops->set_ufo) {
-               err = dev->ethtool_ops->set_ufo(dev, 0);
-               if (err)
-                       return err;
-       }
-       return dev->ethtool_ops->set_sg(dev, data);
-}
-
-static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
-{
-       int err;
-
-       if (!dev->ethtool_ops->set_tx_csum)
-               return -EOPNOTSUPP;
-
-       if (!data && dev->ethtool_ops->set_sg) {
-               err = __ethtool_set_sg(dev, 0);
-               if (err)
-                       return err;
-       }
-
-       return dev->ethtool_ops->set_tx_csum(dev, data);
-}
-
-static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
-{
-       if (!dev->ethtool_ops->set_rx_csum)
-               return -EOPNOTSUPP;
-
-       if (!data)
-               dev->features &= ~NETIF_F_GRO;
-
-       return dev->ethtool_ops->set_rx_csum(dev, data);
-}
-
-static int __ethtool_set_tso(struct net_device *dev, u32 data)
-{
-       if (!dev->ethtool_ops->set_tso)
-               return -EOPNOTSUPP;
-
-       if (data && !(dev->features & NETIF_F_SG))
-               return -EINVAL;
-
-       return dev->ethtool_ops->set_tso(dev, data);
-}
-
-static int __ethtool_set_ufo(struct net_device *dev, u32 data)
-{
-       if (!dev->ethtool_ops->set_ufo)
-               return -EOPNOTSUPP;
-       if (data && !(dev->features & NETIF_F_SG))
-               return -EINVAL;
-       if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
-               (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
-                       == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
-               return -EINVAL;
-       return dev->ethtool_ops->set_ufo(dev, data);
-}
-
 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_test test;
@@ -1771,9 +1419,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
                break;
        case ETHTOOL_GFLAGS:
                rc = ethtool_get_value(dev, useraddr, ethcmd,
-                                      (dev->ethtool_ops->get_flags ?
-                                       dev->ethtool_ops->get_flags :
-                                       ethtool_op_get_flags));
+                                       __ethtool_get_flags);
                break;
        case ETHTOOL_SFLAGS:
                rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
new file mode 100644 (file)
index 0000000..0985b9b
--- /dev/null
@@ -0,0 +1,143 @@
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <net/flow_keys.h>
+
+/* copy saddr & daddr, possibly using 64bit load/store
+ * Equivalent to :     flow->src = iph->saddr;
+ *                     flow->dst = iph->daddr;
+ */
+static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+{
+       BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
+                    offsetof(typeof(*flow), src) + sizeof(flow->src));
+       memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+}
+
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+{
+       int poff, nhoff = skb_network_offset(skb);
+       u8 ip_proto;
+       __be16 proto = skb->protocol;
+
+       memset(flow, 0, sizeof(*flow));
+
+again:
+       switch (proto) {
+       case __constant_htons(ETH_P_IP): {
+               const struct iphdr *iph;
+               struct iphdr _iph;
+ip:
+               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+               if (!iph)
+                       return false;
+
+               if (ip_is_fragment(iph))
+                       ip_proto = 0;
+               else
+                       ip_proto = iph->protocol;
+               iph_to_flow_copy_addrs(flow, iph);
+               nhoff += iph->ihl * 4;
+               break;
+       }
+       case __constant_htons(ETH_P_IPV6): {
+               const struct ipv6hdr *iph;
+               struct ipv6hdr _iph;
+ipv6:
+               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+               if (!iph)
+                       return false;
+
+               ip_proto = iph->nexthdr;
+               flow->src = iph->saddr.s6_addr32[3];
+               flow->dst = iph->daddr.s6_addr32[3];
+               nhoff += sizeof(struct ipv6hdr);
+               break;
+       }
+       case __constant_htons(ETH_P_8021Q): {
+               const struct vlan_hdr *vlan;
+               struct vlan_hdr _vlan;
+
+               vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
+               if (!vlan)
+                       return false;
+
+               proto = vlan->h_vlan_encapsulated_proto;
+               nhoff += sizeof(*vlan);
+               goto again;
+       }
+       case __constant_htons(ETH_P_PPP_SES): {
+               struct {
+                       struct pppoe_hdr hdr;
+                       __be16 proto;
+               } *hdr, _hdr;
+               hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+               if (!hdr)
+                       return false;
+               proto = hdr->proto;
+               nhoff += PPPOE_SES_HLEN;
+               switch (proto) {
+               case __constant_htons(PPP_IP):
+                       goto ip;
+               case __constant_htons(PPP_IPV6):
+                       goto ipv6;
+               default:
+                       return false;
+               }
+       }
+       default:
+               return false;
+       }
+
+       switch (ip_proto) {
+       case IPPROTO_GRE: {
+               struct gre_hdr {
+                       __be16 flags;
+                       __be16 proto;
+               } *hdr, _hdr;
+
+               hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+               if (!hdr)
+                       return false;
+               /*
+                * Only look inside GRE if version zero and no
+                * routing
+                */
+               if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
+                       proto = hdr->proto;
+                       nhoff += 4;
+                       if (hdr->flags & GRE_CSUM)
+                               nhoff += 4;
+                       if (hdr->flags & GRE_KEY)
+                               nhoff += 4;
+                       if (hdr->flags & GRE_SEQ)
+                               nhoff += 4;
+                       goto again;
+               }
+               break;
+       }
+       case IPPROTO_IPIP:
+               goto again;
+       default:
+               break;
+       }
+
+       flow->ip_proto = ip_proto;
+       poff = proto_ports_offset(ip_proto);
+       if (poff >= 0) {
+               __be32 *ports, _ports;
+
+               nhoff += poff;
+               ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
+               if (ports)
+                       flow->ports = *ports;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(skb_flow_dissect);
index 5ac07d3..d57a40a 100644 (file)
@@ -238,6 +238,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
                                   it to safe state.
                                 */
                                skb_queue_purge(&n->arp_queue);
+                               n->arp_queue_len_bytes = 0;
                                n->output = neigh_blackhole;
                                if (n->nud_state & NUD_VALID)
                                        n->nud_state = NUD_NOARP;
@@ -272,7 +273,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
 }
 EXPORT_SYMBOL(neigh_ifdown);
 
-static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
 {
        struct neighbour *n = NULL;
        unsigned long now = jiffies;
@@ -287,7 +288,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
                        goto out_entries;
        }
 
-       n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+       if (tbl->entry_size)
+               n = kzalloc(tbl->entry_size, GFP_ATOMIC);
+       else {
+               int sz = sizeof(*n) + tbl->key_len;
+
+               sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
+               sz += dev->neigh_priv_len;
+               n = kzalloc(sz, GFP_ATOMIC);
+       }
        if (!n)
                goto out_entries;
 
@@ -462,7 +471,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
        u32 hash_val;
        int key_len = tbl->key_len;
        int error;
-       struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+       struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
        struct neigh_hash_table *nht;
 
        if (!n) {
@@ -480,11 +489,12 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
                goto out_neigh_release;
        }
 
-       /* Device specific setup. */
-       if (n->parms->neigh_setup &&
-           (error = n->parms->neigh_setup(n)) < 0) {
-               rc = ERR_PTR(error);
-               goto out_neigh_release;
+       if (dev->netdev_ops->ndo_neigh_construct) {
+               error = dev->netdev_ops->ndo_neigh_construct(n);
+               if (error < 0) {
+                       rc = ERR_PTR(error);
+                       goto out_neigh_release;
+               }
        }
 
        n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
@@ -677,18 +687,14 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
                neigh_parms_destroy(parms);
 }
 
-static void neigh_destroy_rcu(struct rcu_head *head)
-{
-       struct neighbour *neigh = container_of(head, struct neighbour, rcu);
-
-       kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
-}
 /*
  *     neighbour must already be out of the table;
  *
  */
 void neigh_destroy(struct neighbour *neigh)
 {
+       struct net_device *dev = neigh->dev;
+
        NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 
        if (!neigh->dead) {
@@ -702,14 +708,15 @@ void neigh_destroy(struct neighbour *neigh)
                printk(KERN_WARNING "Impossible event.\n");
 
        skb_queue_purge(&neigh->arp_queue);
+       neigh->arp_queue_len_bytes = 0;
 
-       dev_put(neigh->dev);
+       dev_put(dev);
        neigh_parms_put(neigh->parms);
 
        NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
 
        atomic_dec(&neigh->tbl->entries);
-       call_rcu(&neigh->rcu, neigh_destroy_rcu);
+       kfree_rcu(neigh, rcu);
 }
 EXPORT_SYMBOL(neigh_destroy);
 
@@ -842,6 +849,7 @@ static void neigh_invalidate(struct neighbour *neigh)
                write_lock(&neigh->lock);
        }
        skb_queue_purge(&neigh->arp_queue);
+       neigh->arp_queue_len_bytes = 0;
 }
 
 static void neigh_probe(struct neighbour *neigh)
@@ -980,15 +988,20 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 
        if (neigh->nud_state == NUD_INCOMPLETE) {
                if (skb) {
-                       if (skb_queue_len(&neigh->arp_queue) >=
-                           neigh->parms->queue_len) {
+                       while (neigh->arp_queue_len_bytes + skb->truesize >
+                              neigh->parms->queue_len_bytes) {
                                struct sk_buff *buff;
+
                                buff = __skb_dequeue(&neigh->arp_queue);
+                               if (!buff)
+                                       break;
+                               neigh->arp_queue_len_bytes -= buff->truesize;
                                kfree_skb(buff);
                                NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
                        }
                        skb_dst_force(skb);
                        __skb_queue_tail(&neigh->arp_queue, skb);
+                       neigh->arp_queue_len_bytes += skb->truesize;
                }
                rc = 1;
        }
@@ -1167,7 +1180,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 
                        rcu_read_lock();
                        /* On shaper/eql skb->dst->neighbour != neigh :( */
-                       if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
+                       if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
                                n1 = n2;
                        n1->output(n1, skb);
                        rcu_read_unlock();
@@ -1175,6 +1188,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                        write_lock_bh(&neigh->lock);
                }
                skb_queue_purge(&neigh->arp_queue);
+               neigh->arp_queue_len_bytes = 0;
        }
 out:
        if (update_isrouter) {
@@ -1477,11 +1491,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
        tbl->parms.reachable_time =
                          neigh_rand_reach_time(tbl->parms.base_reachable_time);
 
-       if (!tbl->kmem_cachep)
-               tbl->kmem_cachep =
-                       kmem_cache_create(tbl->id, tbl->entry_size, 0,
-                                         SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-                                         NULL);
        tbl->stats = alloc_percpu(struct neigh_statistics);
        if (!tbl->stats)
                panic("cannot create neighbour cache statistics");
@@ -1566,9 +1575,6 @@ int neigh_table_clear(struct neigh_table *tbl)
        free_percpu(tbl->stats);
        tbl->stats = NULL;
 
-       kmem_cache_destroy(tbl->kmem_cachep);
-       tbl->kmem_cachep = NULL;
-
        return 0;
 }
 EXPORT_SYMBOL(neigh_table_clear);
@@ -1747,7 +1753,11 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
                NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
 
        NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
-       NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+       NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes);
+       /* approximative value for deprecated QUEUE_LEN (in packets) */
+       NLA_PUT_U32(skb, NDTPA_QUEUE_LEN,
+                   DIV_ROUND_UP(parms->queue_len_bytes,
+                                SKB_TRUESIZE(ETH_FRAME_LEN)));
        NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
        NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
        NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
@@ -1974,7 +1984,11 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
                        switch (i) {
                        case NDTPA_QUEUE_LEN:
-                               p->queue_len = nla_get_u32(tbp[i]);
+                               p->queue_len_bytes = nla_get_u32(tbp[i]) *
+                                                    SKB_TRUESIZE(ETH_FRAME_LEN);
+                               break;
+                       case NDTPA_QUEUE_LENBYTES:
+                               p->queue_len_bytes = nla_get_u32(tbp[i]);
                                break;
                        case NDTPA_PROXY_QLEN:
                                p->proxy_qlen = nla_get_u32(tbp[i]);
@@ -2638,117 +2652,158 @@ EXPORT_SYMBOL(neigh_app_ns);
 
 #ifdef CONFIG_SYSCTL
 
-#define NEIGH_VARS_MAX 19
+static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
+                          size_t *lenp, loff_t *ppos)
+{
+       int size, ret;
+       ctl_table tmp = *ctl;
+
+       tmp.data = &size;
+       size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
+       ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+       if (write && !ret)
+               *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
+       return ret;
+}
+
+enum {
+       NEIGH_VAR_MCAST_PROBE,
+       NEIGH_VAR_UCAST_PROBE,
+       NEIGH_VAR_APP_PROBE,
+       NEIGH_VAR_RETRANS_TIME,
+       NEIGH_VAR_BASE_REACHABLE_TIME,
+       NEIGH_VAR_DELAY_PROBE_TIME,
+       NEIGH_VAR_GC_STALETIME,
+       NEIGH_VAR_QUEUE_LEN,
+       NEIGH_VAR_QUEUE_LEN_BYTES,
+       NEIGH_VAR_PROXY_QLEN,
+       NEIGH_VAR_ANYCAST_DELAY,
+       NEIGH_VAR_PROXY_DELAY,
+       NEIGH_VAR_LOCKTIME,
+       NEIGH_VAR_RETRANS_TIME_MS,
+       NEIGH_VAR_BASE_REACHABLE_TIME_MS,
+       NEIGH_VAR_GC_INTERVAL,
+       NEIGH_VAR_GC_THRESH1,
+       NEIGH_VAR_GC_THRESH2,
+       NEIGH_VAR_GC_THRESH3,
+       NEIGH_VAR_MAX
+};
 
 static struct neigh_sysctl_table {
        struct ctl_table_header *sysctl_header;
-       struct ctl_table neigh_vars[NEIGH_VARS_MAX];
+       struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
        char *dev_name;
 } neigh_sysctl_template __read_mostly = {
        .neigh_vars = {
-               {
+               [NEIGH_VAR_MCAST_PROBE] = {
                        .procname       = "mcast_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_UCAST_PROBE] = {
                        .procname       = "ucast_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_APP_PROBE] = {
                        .procname       = "app_solicit",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_RETRANS_TIME] = {
                        .procname       = "retrans_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_BASE_REACHABLE_TIME] = {
                        .procname       = "base_reachable_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_DELAY_PROBE_TIME] = {
                        .procname       = "delay_first_probe_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_GC_STALETIME] = {
                        .procname       = "gc_stale_time",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_QUEUE_LEN] = {
                        .procname       = "unres_qlen",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
+                       .proc_handler   = proc_unres_qlen,
+               },
+               [NEIGH_VAR_QUEUE_LEN_BYTES] = {
+                       .procname       = "unres_qlen_bytes",
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_PROXY_QLEN] = {
                        .procname       = "proxy_qlen",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_ANYCAST_DELAY] = {
                        .procname       = "anycast_delay",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_PROXY_DELAY] = {
                        .procname       = "proxy_delay",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_LOCKTIME] = {
                        .procname       = "locktime",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_userhz_jiffies,
                },
-               {
+               [NEIGH_VAR_RETRANS_TIME_MS] = {
                        .procname       = "retrans_time_ms",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_ms_jiffies,
                },
-               {
+               [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
                        .procname       = "base_reachable_time_ms",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_ms_jiffies,
                },
-               {
+               [NEIGH_VAR_GC_INTERVAL] = {
                        .procname       = "gc_interval",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec_jiffies,
                },
-               {
+               [NEIGH_VAR_GC_THRESH1] = {
                        .procname       = "gc_thresh1",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_GC_THRESH2] = {
                        .procname       = "gc_thresh2",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
-               {
+               [NEIGH_VAR_GC_THRESH3] = {
                        .procname       = "gc_thresh3",
                        .maxlen         = sizeof(int),
                        .mode           = 0644,
@@ -2781,47 +2836,49 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
        if (!t)
                goto err;
 
-       t->neigh_vars[0].data  = &p->mcast_probes;
-       t->neigh_vars[1].data  = &p->ucast_probes;
-       t->neigh_vars[2].data  = &p->app_probes;
-       t->neigh_vars[3].data  = &p->retrans_time;
-       t->neigh_vars[4].data  = &p->base_reachable_time;
-       t->neigh_vars[5].data  = &p->delay_probe_time;
-       t->neigh_vars[6].data  = &p->gc_staletime;
-       t->neigh_vars[7].data  = &p->queue_len;
-       t->neigh_vars[8].data  = &p->proxy_qlen;
-       t->neigh_vars[9].data  = &p->anycast_delay;
-       t->neigh_vars[10].data = &p->proxy_delay;
-       t->neigh_vars[11].data = &p->locktime;
-       t->neigh_vars[12].data  = &p->retrans_time;
-       t->neigh_vars[13].data  = &p->base_reachable_time;
+       t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data  = &p->mcast_probes;
+       t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data  = &p->ucast_probes;
+       t->neigh_vars[NEIGH_VAR_APP_PROBE].data  = &p->app_probes;
+       t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data  = &p->retrans_time;
+       t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data  = &p->base_reachable_time;
+       t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data  = &p->delay_probe_time;
+       t->neigh_vars[NEIGH_VAR_GC_STALETIME].data  = &p->gc_staletime;
+       t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data  = &p->queue_len_bytes;
+       t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data  = &p->queue_len_bytes;
+       t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data  = &p->proxy_qlen;
+       t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data  = &p->anycast_delay;
+       t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
+       t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
+       t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data  = &p->retrans_time;
+       t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data  = &p->base_reachable_time;
 
        if (dev) {
                dev_name_source = dev->name;
                /* Terminate the table early */
-               memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+               memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
+                      sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
        } else {
                dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
-               t->neigh_vars[14].data = (int *)(p + 1);
-               t->neigh_vars[15].data = (int *)(p + 1) + 1;
-               t->neigh_vars[16].data = (int *)(p + 1) + 2;
-               t->neigh_vars[17].data = (int *)(p + 1) + 3;
+               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
+               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
        }
 
 
        if (handler) {
                /* RetransTime */
-               t->neigh_vars[3].proc_handler = handler;
-               t->neigh_vars[3].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
                /* ReachableTime */
-               t->neigh_vars[4].proc_handler = handler;
-               t->neigh_vars[4].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
                /* RetransTime (in milliseconds)*/
-               t->neigh_vars[12].proc_handler = handler;
-               t->neigh_vars[12].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
                /* ReachableTime (in milliseconds) */
-               t->neigh_vars[13].proc_handler = handler;
-               t->neigh_vars[13].extra1 = dev;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+               t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
        }
 
        t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
index c71c434..9d13463 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/wireless.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
+#include <linux/jiffies.h>
 #include <net/wext.h>
 
 #include "net-sysfs.h"
@@ -606,9 +607,12 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        rcu_assign_pointer(queue->rps_map, map);
        spin_unlock(&rps_map_lock);
 
-       if (old_map)
+       if (map)
+               jump_label_inc(&rps_needed);
+       if (old_map) {
                kfree_rcu(old_map, rcu);
-
+               jump_label_dec(&rps_needed);
+       }
        free_cpumask_var(mask);
        return len;
 }
@@ -780,7 +784,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 #endif
 }
 
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
 /*
  * netdev_queue sysfs structures and functions.
  */
@@ -826,6 +830,133 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
        .store = netdev_queue_attr_store,
 };
 
+static ssize_t show_trans_timeout(struct netdev_queue *queue,
+                                 struct netdev_queue_attribute *attribute,
+                                 char *buf)
+{
+       unsigned long trans_timeout;
+
+       spin_lock_irq(&queue->_xmit_lock);
+       trans_timeout = queue->trans_timeout;
+       spin_unlock_irq(&queue->_xmit_lock);
+
+       return sprintf(buf, "%lu", trans_timeout);
+}
+
+static struct netdev_queue_attribute queue_trans_timeout =
+       __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
+
+#ifdef CONFIG_BQL
+/*
+ * Byte queue limits sysfs structures and functions.
+ */
+static ssize_t bql_show(char *buf, unsigned int value)
+{
+       return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t bql_set(const char *buf, const size_t count,
+                      unsigned int *pvalue)
+{
+       unsigned int value;
+       int err;
+
+       if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
+               value = DQL_MAX_LIMIT;
+       else {
+               err = kstrtouint(buf, 10, &value);
+               if (err < 0)
+                       return err;
+               if (value > DQL_MAX_LIMIT)
+                       return -EINVAL;
+       }
+
+       *pvalue = value;
+
+       return count;
+}
+
+static ssize_t bql_show_hold_time(struct netdev_queue *queue,
+                                 struct netdev_queue_attribute *attr,
+                                 char *buf)
+{
+       struct dql *dql = &queue->dql;
+
+       return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
+}
+
+static ssize_t bql_set_hold_time(struct netdev_queue *queue,
+                                struct netdev_queue_attribute *attribute,
+                                const char *buf, size_t len)
+{
+       struct dql *dql = &queue->dql;
+       unsigned value;
+       int err;
+
+       err = kstrtouint(buf, 10, &value);
+       if (err < 0)
+               return err;
+
+       dql->slack_hold_time = msecs_to_jiffies(value);
+
+       return len;
+}
+
+static struct netdev_queue_attribute bql_hold_time_attribute =
+       __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
+           bql_set_hold_time);
+
+static ssize_t bql_show_inflight(struct netdev_queue *queue,
+                                struct netdev_queue_attribute *attr,
+                                char *buf)
+{
+       struct dql *dql = &queue->dql;
+
+       return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
+}
+
+static struct netdev_queue_attribute bql_inflight_attribute =
+       __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL);
+
+#define BQL_ATTR(NAME, FIELD)                                          \
+static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,           \
+                                struct netdev_queue_attribute *attr,   \
+                                char *buf)                             \
+{                                                                      \
+       return bql_show(buf, queue->dql.FIELD);                         \
+}                                                                      \
+                                                                       \
+static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,            \
+                               struct netdev_queue_attribute *attr,    \
+                               const char *buf, size_t len)            \
+{                                                                      \
+       return bql_set(buf, len, &queue->dql.FIELD);                    \
+}                                                                      \
+                                                                       \
+static struct netdev_queue_attribute bql_ ## NAME ## _attribute =      \
+       __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
+           bql_set_ ## NAME);
+
+BQL_ATTR(limit, limit)
+BQL_ATTR(limit_max, max_limit)
+BQL_ATTR(limit_min, min_limit)
+
+static struct attribute *dql_attrs[] = {
+       &bql_limit_attribute.attr,
+       &bql_limit_max_attribute.attr,
+       &bql_limit_min_attribute.attr,
+       &bql_hold_time_attribute.attr,
+       &bql_inflight_attribute.attr,
+       NULL
+};
+
+static struct attribute_group dql_group = {
+       .name  = "byte_queue_limits",
+       .attrs  = dql_attrs,
+};
+#endif /* CONFIG_BQL */
+
+#ifdef CONFIG_XPS
 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 {
        struct net_device *dev = queue->dev;
@@ -890,6 +1021,52 @@ static DEFINE_MUTEX(xps_map_mutex);
 #define xmap_dereference(P)            \
        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
 
+static void xps_queue_release(struct netdev_queue *queue)
+{
+       struct net_device *dev = queue->dev;
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       unsigned long index;
+       int i, pos, nonempty = 0;
+
+       index = get_netdev_queue_index(queue);
+
+       mutex_lock(&xps_map_mutex);
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       if (dev_maps) {
+               for_each_possible_cpu(i) {
+                       map = xmap_dereference(dev_maps->cpu_map[i]);
+                       if (!map)
+                               continue;
+
+                       for (pos = 0; pos < map->len; pos++)
+                               if (map->queues[pos] == index)
+                                       break;
+
+                       if (pos < map->len) {
+                               if (map->len > 1)
+                                       map->queues[pos] =
+                                           map->queues[--map->len];
+                               else {
+                                       RCU_INIT_POINTER(dev_maps->cpu_map[i],
+                                           NULL);
+                                       kfree_rcu(map, rcu);
+                                       map = NULL;
+                               }
+                       }
+                       if (map)
+                               nonempty = 1;
+               }
+
+               if (!nonempty) {
+                       RCU_INIT_POINTER(dev->xps_maps, NULL);
+                       kfree_rcu(dev_maps, rcu);
+               }
+       }
+       mutex_unlock(&xps_map_mutex);
+}
+
 static ssize_t store_xps_map(struct netdev_queue *queue,
                      struct netdev_queue_attribute *attribute,
                      const char *buf, size_t len)
@@ -901,7 +1078,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
        struct xps_map *map, *new_map;
        struct xps_dev_maps *dev_maps, *new_dev_maps;
        int nonempty = 0;
-       int numa_node = -2;
+       int numa_node_id = -2;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -944,10 +1121,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
                need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
 #ifdef CONFIG_NUMA
                if (need_set) {
-                       if (numa_node == -2)
-                               numa_node = cpu_to_node(cpu);
-                       else if (numa_node != cpu_to_node(cpu))
-                               numa_node = -1;
+                       if (numa_node_id == -2)
+                               numa_node_id = cpu_to_node(cpu);
+                       else if (numa_node_id != cpu_to_node(cpu))
+                               numa_node_id = -1;
                }
 #endif
                if (need_set && pos >= map_len) {
@@ -997,7 +1174,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
        if (dev_maps)
                kfree_rcu(dev_maps, rcu);
 
-       netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
+       netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
                                            NUMA_NO_NODE);
 
        mutex_unlock(&xps_map_mutex);
@@ -1020,58 +1197,23 @@ error:
 
 static struct netdev_queue_attribute xps_cpus_attribute =
     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+#endif /* CONFIG_XPS */
 
 static struct attribute *netdev_queue_default_attrs[] = {
+       &queue_trans_timeout.attr,
+#ifdef CONFIG_XPS
        &xps_cpus_attribute.attr,
+#endif
        NULL
 };
 
 static void netdev_queue_release(struct kobject *kobj)
 {
        struct netdev_queue *queue = to_netdev_queue(kobj);
-       struct net_device *dev = queue->dev;
-       struct xps_dev_maps *dev_maps;
-       struct xps_map *map;
-       unsigned long index;
-       int i, pos, nonempty = 0;
 
-       index = get_netdev_queue_index(queue);
-
-       mutex_lock(&xps_map_mutex);
-       dev_maps = xmap_dereference(dev->xps_maps);
-
-       if (dev_maps) {
-               for_each_possible_cpu(i) {
-                       map = xmap_dereference(dev_maps->cpu_map[i]);
-                       if (!map)
-                               continue;
-
-                       for (pos = 0; pos < map->len; pos++)
-                               if (map->queues[pos] == index)
-                                       break;
-
-                       if (pos < map->len) {
-                               if (map->len > 1)
-                                       map->queues[pos] =
-                                           map->queues[--map->len];
-                               else {
-                                       RCU_INIT_POINTER(dev_maps->cpu_map[i],
-                                           NULL);
-                                       kfree_rcu(map, rcu);
-                                       map = NULL;
-                               }
-                       }
-                       if (map)
-                               nonempty = 1;
-               }
-
-               if (!nonempty) {
-                       RCU_INIT_POINTER(dev->xps_maps, NULL);
-                       kfree_rcu(dev_maps, rcu);
-               }
-       }
-
-       mutex_unlock(&xps_map_mutex);
+#ifdef CONFIG_XPS
+       xps_queue_release(queue);
+#endif
 
        memset(kobj, 0, sizeof(*kobj));
        dev_put(queue->dev);
@@ -1092,22 +1234,29 @@ static int netdev_queue_add_kobject(struct net_device *net, int index)
        kobj->kset = net->queues_kset;
        error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
            "tx-%u", index);
-       if (error) {
-               kobject_put(kobj);
-               return error;
-       }
+       if (error)
+               goto exit;
+
+#ifdef CONFIG_BQL
+       error = sysfs_create_group(kobj, &dql_group);
+       if (error)
+               goto exit;
+#endif
 
        kobject_uevent(kobj, KOBJ_ADD);
        dev_hold(queue->dev);
 
+       return 0;
+exit:
+       kobject_put(kobj);
        return error;
 }
-#endif /* CONFIG_XPS */
+#endif /* CONFIG_SYSFS */
 
 int
 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 {
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SYSFS
        int i;
        int error = 0;
 
@@ -1119,20 +1268,26 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
                }
        }
 
-       while (--i >= new_num)
-               kobject_put(&net->_tx[i].kobj);
+       while (--i >= new_num) {
+               struct netdev_queue *queue = net->_tx + i;
+
+#ifdef CONFIG_BQL
+               sysfs_remove_group(&queue->kobj, &dql_group);
+#endif
+               kobject_put(&queue->kobj);
+       }
 
        return error;
 #else
        return 0;
-#endif
+#endif /* CONFIG_SYSFS */
 }
 
 static int register_queue_kobjects(struct net_device *net)
 {
        int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
 
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        net->queues_kset = kset_create_and_add("queues",
            NULL, &net->dev.kobj);
        if (!net->queues_kset)
@@ -1173,7 +1328,7 @@ static void remove_queue_kobjects(struct net_device *net)
 
        net_rx_queue_update_kobjects(net, real_rx, 0);
        netdev_queue_update_kobjects(net, real_tx, 0);
-#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
+#ifdef CONFIG_SYSFS
        kset_unregister(net->queues_kset);
 #endif
 }
index cf64c1f..0d38808 100644 (file)
@@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work)
 
                local_irq_save(flags);
                __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_frozen_or_stopped(txq) ||
+               if (netif_xmit_frozen_or_stopped(txq) ||
                    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
                        __netif_tx_unlock(txq);
@@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
                        if (__netif_tx_trylock(txq)) {
-                               if (!netif_tx_queue_stopped(txq)) {
+                               if (!netif_xmit_stopped(txq)) {
                                        status = ops->ndo_start_xmit(skb, dev);
                                        if (status == NETDEV_TX_OK)
                                                txq_trans_update(txq);
@@ -422,6 +422,7 @@ static void arp_reply(struct sk_buff *skb)
        struct sk_buff *send_skb;
        struct netpoll *np, *tmp;
        unsigned long flags;
+       int hlen, tlen;
        int hits = 0;
 
        if (list_empty(&npinfo->rx_np))
@@ -479,8 +480,9 @@ static void arp_reply(struct sk_buff *skb)
                if (tip != np->local_ip)
                        continue;
 
-               send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
-                                   LL_RESERVED_SPACE(np->dev));
+               hlen = LL_RESERVED_SPACE(np->dev);
+               tlen = np->dev->needed_tailroom;
+               send_skb = find_skb(np, size + hlen + tlen, hlen);
                if (!send_skb)
                        continue;
 
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
new file mode 100644 (file)
index 0000000..3a9fd48
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * net/core/netprio_cgroup.c   Priority Control Group
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * Authors:    Neil Horman <nhorman@tuxdriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/cgroup.h>
+#include <linux/rcupdate.h>
+#include <linux/atomic.h>
+#include <net/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/sock.h>
+#include <net/netprio_cgroup.h>
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+                                              struct cgroup *cgrp);
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+struct cgroup_subsys net_prio_subsys = {
+       .name           = "net_prio",
+       .create         = cgrp_create,
+       .destroy        = cgrp_destroy,
+       .populate       = cgrp_populate,
+#ifdef CONFIG_NETPRIO_CGROUP
+       .subsys_id      = net_prio_subsys_id,
+#endif
+       .module         = THIS_MODULE
+};
+
+#define PRIOIDX_SZ 128
+
+static unsigned long prioidx_map[PRIOIDX_SZ];
+static DEFINE_SPINLOCK(prioidx_map_lock);
+static atomic_t max_prioidx = ATOMIC_INIT(0);
+
+static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
+{
+       return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
+                           struct cgroup_netprio_state, css);
+}
+
+static int get_prioidx(u32 *prio)
+{
+       unsigned long flags;
+       u32 prioidx;
+
+       spin_lock_irqsave(&prioidx_map_lock, flags);
+       prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ);
+       set_bit(prioidx, prioidx_map);
+       spin_unlock_irqrestore(&prioidx_map_lock, flags);
+       if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ)
+               return -ENOSPC;
+
+       atomic_set(&max_prioidx, prioidx);
+       *prio = prioidx;
+       return 0;
+}
+
+static void put_prioidx(u32 idx)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&prioidx_map_lock, flags);
+       clear_bit(idx, prioidx_map);
+       spin_unlock_irqrestore(&prioidx_map_lock, flags);
+}
+
+static void extend_netdev_table(struct net_device *dev, u32 new_len)
+{
+       size_t new_size = sizeof(struct netprio_map) +
+                          ((sizeof(u32) * new_len));
+       struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL);
+       struct netprio_map *old_priomap;
+       int i;
+
+       old_priomap  = rtnl_dereference(dev->priomap);
+
+       if (!new_priomap) {
+               printk(KERN_WARNING "Unable to alloc new priomap!\n");
+               return;
+       }
+
+       for (i = 0;
+            old_priomap && (i < old_priomap->priomap_len);
+            i++)
+               new_priomap->priomap[i] = old_priomap->priomap[i];
+
+       new_priomap->priomap_len = new_len;
+
+       rcu_assign_pointer(dev->priomap, new_priomap);
+       if (old_priomap)
+               kfree_rcu(old_priomap, rcu);
+}
+
+static void update_netdev_tables(void)
+{
+       struct net_device *dev;
+       u32 max_len = atomic_read(&max_prioidx);
+       struct netprio_map *map;
+
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               map = rtnl_dereference(dev->priomap);
+               if ((!map) ||
+                   (map->priomap_len < max_len))
+                       extend_netdev_table(dev, max_len);
+       }
+       rtnl_unlock();
+}
+
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+                                                struct cgroup *cgrp)
+{
+       struct cgroup_netprio_state *cs;
+       int ret;
+
+       cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+       if (!cs)
+               return ERR_PTR(-ENOMEM);
+
+       if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
+               kfree(cs);
+               return ERR_PTR(-EINVAL);
+       }
+
+       ret = get_prioidx(&cs->prioidx);
+       if (ret != 0) {
+               printk(KERN_WARNING "No space in priority index array\n");
+               kfree(cs);
+               return ERR_PTR(ret);
+       }
+
+       return &cs->css;
+}
+
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+       struct cgroup_netprio_state *cs;
+       struct net_device *dev;
+       struct netprio_map *map;
+
+       cs = cgrp_netprio_state(cgrp);
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               map = rtnl_dereference(dev->priomap);
+               if (map)
+                       map->priomap[cs->prioidx] = 0;
+       }
+       rtnl_unlock();
+       put_prioidx(cs->prioidx);
+       kfree(cs);
+}
+
+static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+{
+       return (u64)cgrp_netprio_state(cgrp)->prioidx;
+}
+
+static int read_priomap(struct cgroup *cont, struct cftype *cft,
+                       struct cgroup_map_cb *cb)
+{
+       struct net_device *dev;
+       u32 prioidx = cgrp_netprio_state(cont)->prioidx;
+       u32 priority;
+       struct netprio_map *map;
+
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, dev) {
+               map = rcu_dereference(dev->priomap);
+               priority = map ? map->priomap[prioidx] : 0;
+               cb->fill(cb, dev->name, priority);
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
+static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+                        const char *buffer)
+{
+       char *devname = kstrdup(buffer, GFP_KERNEL);
+       int ret = -EINVAL;
+       u32 prioidx = cgrp_netprio_state(cgrp)->prioidx;
+       unsigned long priority;
+       char *priostr;
+       struct net_device *dev;
+       struct netprio_map *map;
+
+       if (!devname)
+               return -ENOMEM;
+
+       /*
+        * Minimally sized valid priomap string
+        */
+       if (strlen(devname) < 3)
+               goto out_free_devname;
+
+       priostr = strstr(devname, " ");
+       if (!priostr)
+               goto out_free_devname;
+
+       /*
+        *Separate the devname from the associated priority
+        *and advance the priostr poitner to the priority value
+        */
+       *priostr = '\0';
+       priostr++;
+
+       /*
+        * If the priostr points to NULL, we're at the end of the passed
+        * in string, and its not a valid write
+        */
+       if (*priostr == '\0')
+               goto out_free_devname;
+
+       ret = kstrtoul(priostr, 10, &priority);
+       if (ret < 0)
+               goto out_free_devname;
+
+       ret = -ENODEV;
+
+       dev = dev_get_by_name(&init_net, devname);
+       if (!dev)
+               goto out_free_devname;
+
+       update_netdev_tables();
+       ret = 0;
+       rcu_read_lock();
+       map = rcu_dereference(dev->priomap);
+       if (map)
+               map->priomap[prioidx] = priority;
+       rcu_read_unlock();
+       dev_put(dev);
+
+out_free_devname:
+       kfree(devname);
+       return ret;
+}
+
+static struct cftype ss_files[] = {
+       {
+               .name = "prioidx",
+               .read_u64 = read_prioidx,
+       },
+       {
+               .name = "ifpriomap",
+               .read_map = read_priomap,
+               .write_string = write_priomap,
+       },
+};
+
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
+{
+       return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
+}
+
+static int netprio_device_event(struct notifier_block *unused,
+                               unsigned long event, void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct netprio_map *old;
+       u32 max_len = atomic_read(&max_prioidx);
+
+       /*
+        * Note this is called with rtnl_lock held so we have update side
+        * protection on our rcu assignments
+        */
+
+       switch (event) {
+
+       case NETDEV_REGISTER:
+               if (max_len)
+                       extend_netdev_table(dev, max_len);
+               break;
+       case NETDEV_UNREGISTER:
+               old = rtnl_dereference(dev->priomap);
+               RCU_INIT_POINTER(dev->priomap, NULL);
+               if (old)
+                       kfree_rcu(old, rcu);
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block netprio_device_notifier = {
+       .notifier_call = netprio_device_event
+};
+
+static int __init init_cgroup_netprio(void)
+{
+       int ret;
+
+       ret = cgroup_load_subsys(&net_prio_subsys);
+       if (ret)
+               goto out;
+#ifndef CONFIG_NETPRIO_CGROUP
+       smp_wmb();
+       net_prio_subsys_id = net_prio_subsys.subsys_id;
+#endif
+
+       register_netdevice_notifier(&netprio_device_notifier);
+
+out:
+       return ret;
+}
+
+static void __exit exit_cgroup_netprio(void)
+{
+       struct netprio_map *old;
+       struct net_device *dev;
+
+       unregister_netdevice_notifier(&netprio_device_notifier);
+
+       cgroup_unload_subsys(&net_prio_subsys);
+
+#ifndef CONFIG_NETPRIO_CGROUP
+       net_prio_subsys_id = -1;
+       synchronize_rcu();
+#endif
+
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               old = rtnl_dereference(dev->priomap);
+               RCU_INIT_POINTER(dev->priomap, NULL);
+               if (old)
+                       kfree_rcu(old, rcu);
+       }
+       rtnl_unlock();
+}
+
+module_init(init_cgroup_netprio);
+module_exit(exit_cgroup_netprio);
+MODULE_LICENSE("GPL v2");
index 0001c24..449fe0f 100644 (file)
@@ -1304,7 +1304,7 @@ static ssize_t pktgen_if_write(struct file *file,
                scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
                snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
 
-               ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
+               pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr;
 
                if (debug)
                        printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
@@ -1327,8 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file,
                scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
                snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
 
-               ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
-                              &pkt_dev->min_in6_daddr);
+               pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr;
                if (debug)
                        printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
 
@@ -1371,7 +1370,7 @@ static ssize_t pktgen_if_write(struct file *file,
                scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
                snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
 
-               ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
+               pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr;
 
                if (debug)
                        printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
@@ -2079,9 +2078,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
                                     ifp = ifp->if_next) {
                                        if (ifp->scope == IFA_LINK &&
                                            !(ifp->flags & IFA_F_TENTATIVE)) {
-                                               ipv6_addr_copy(&pkt_dev->
-                                                              cur_in6_saddr,
-                                                              &ifp->addr);
+                                               pkt_dev->cur_in6_saddr = ifp->addr;
                                                err = 0;
                                                break;
                                        }
@@ -2958,8 +2955,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        iph->payload_len = htons(sizeof(struct udphdr) + datalen);
        iph->nexthdr = IPPROTO_UDP;
 
-       ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr);
-       ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr);
+       iph->daddr = pkt_dev->cur_in6_daddr;
+       iph->saddr = pkt_dev->cur_in6_saddr;
 
        skb->mac_header = (skb->network_header - ETH_HLEN -
                           pkt_dev->pkt_overhead);
@@ -3345,7 +3342,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 
        __netif_tx_lock_bh(txq);
 
-       if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
+       if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
                ret = NETDEV_TX_BUSY;
                pkt_dev->last_ok = 0;
                goto unlock;
index 9083e82..dbf2dda 100644 (file)
@@ -273,6 +273,17 @@ EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 
 static LIST_HEAD(link_ops);
 
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+       const struct rtnl_link_ops *ops;
+
+       list_for_each_entry(ops, &link_ops, list) {
+               if (!strcmp(ops->kind, kind))
+                       return ops;
+       }
+       return NULL;
+}
+
 /**
  * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
  * @ops: struct rtnl_link_ops * to register
@@ -285,6 +296,9 @@ static LIST_HEAD(link_ops);
  */
 int __rtnl_link_register(struct rtnl_link_ops *ops)
 {
+       if (rtnl_link_ops_get(ops->kind))
+               return -EEXIST;
+
        if (!ops->dellink)
                ops->dellink = unregister_netdevice_queue;
 
@@ -351,17 +365,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
 }
 EXPORT_SYMBOL_GPL(rtnl_link_unregister);
 
-static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
-{
-       const struct rtnl_link_ops *ops;
-
-       list_for_each_entry(ops, &link_ops, list) {
-               if (!strcmp(ops->kind, kind))
-                       return ops;
-       }
-       return NULL;
-}
-
 static size_t rtnl_link_get_size(const struct net_device *dev)
 {
        const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
index 925991a..9fbca46 100644 (file)
@@ -36,7 +36,7 @@ static u32 seq_scale(u32 seq)
 }
 #endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
                                   __be16 sport, __be16 dport)
 {
@@ -156,7 +156,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
 }
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
                                  __be16 sport, __be16 dport)
 {
index 3c30ee4..fd36462 100644 (file)
@@ -245,6 +245,55 @@ nodata:
 EXPORT_SYMBOL(__alloc_skb);
 
 /**
+ * build_skb - build a network buffer
+ * @data: data buffer provided by caller
+ *
+ * Allocate a new &sk_buff. Caller provides space holding head and
+ * skb_shared_info. @data must have been allocated by kmalloc()
+ * The return is the new skb buffer.
+ * On a failure the return is %NULL, and @data is not freed.
+ * Notes :
+ *  Before IO, driver allocates only data buffer where NIC put incoming frame
+ *  Driver should add room at head (NET_SKB_PAD) and
+ *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
+ *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
+ *  before giving packet to stack.
+ *  RX rings only contains data buffers, not full skbs.
+ */
+struct sk_buff *build_skb(void *data)
+{
+       struct skb_shared_info *shinfo;
+       struct sk_buff *skb;
+       unsigned int size;
+
+       skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+
+       size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       memset(skb, 0, offsetof(struct sk_buff, tail));
+       skb->truesize = SKB_TRUESIZE(size);
+       atomic_set(&skb->users, 1);
+       skb->head = data;
+       skb->data = data;
+       skb_reset_tail_pointer(skb);
+       skb->end = skb->tail + size;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+       skb->mac_header = ~0U;
+#endif
+
+       /* make sure we initialize shinfo sequentially */
+       shinfo = skb_shinfo(skb);
+       memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+       atomic_set(&shinfo->dataref, 1);
+       kmemcheck_annotate_variable(shinfo->destructor_arg);
+
+       return skb;
+}
+EXPORT_SYMBOL(build_skb);
+
+/**
  *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *     @dev: network device to receive on
  *     @length: length to allocate
@@ -791,8 +840,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 EXPORT_SYMBOL(skb_copy);
 
 /**
- *     pskb_copy       -       create copy of an sk_buff with private head.
+ *     __pskb_copy     -       create copy of an sk_buff with private head.
  *     @skb: buffer to copy
+ *     @headroom: headroom of new skb
  *     @gfp_mask: allocation priority
  *
  *     Make a copy of both an &sk_buff and part of its data, located
@@ -803,16 +853,16 @@ EXPORT_SYMBOL(skb_copy);
  *     The returned buffer has a reference count of 1.
  */
 
-struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
 {
-       unsigned int size = skb_end_pointer(skb) - skb->head;
+       unsigned int size = skb_headlen(skb) + headroom;
        struct sk_buff *n = alloc_skb(size, gfp_mask);
 
        if (!n)
                goto out;
 
        /* Set the data pointer */
-       skb_reserve(n, skb_headroom(skb));
+       skb_reserve(n, headroom);
        /* Set the tail pointer and length */
        skb_put(n, skb_headlen(skb));
        /* Copy the bytes */
@@ -848,7 +898,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 out:
        return n;
 }
-EXPORT_SYMBOL(pskb_copy);
+EXPORT_SYMBOL(__pskb_copy);
 
 /**
  *     pskb_expand_head - reallocate header of &sk_buff
@@ -2621,7 +2671,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
  *     a pointer to the first in a list of new skbs for the segments.
  *     In case of error it returns ERR_PTR(err).
  */
-struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
 {
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
@@ -3169,6 +3219,26 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
 }
 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
 
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+{
+       struct sock *sk = skb->sk;
+       struct sock_exterr_skb *serr;
+       int err;
+
+       skb->wifi_acked_valid = 1;
+       skb->wifi_acked = acked;
+
+       serr = SKB_EXT_ERR(skb);
+       memset(serr, 0, sizeof(*serr));
+       serr->ee.ee_errno = ENOMSG;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+
+       err = sock_queue_err_skb(sk, skb);
+       if (err)
+               kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
+
 
 /**
  * skb_partial_csum_set - set up and verify partial csum values for packet
index 4ed7b1d..5a6a906 100644 (file)
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
+#include <linux/jump_label.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
 #include <net/xfrm.h>
 #include <linux/ipsec.h>
 #include <net/cls_cgroup.h>
+#include <net/netprio_cgroup.h>
 
 #include <linux/filter.h>
 
 #include <net/tcp.h>
 #endif
 
+static DEFINE_RWLOCK(proto_list_lock);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct proto *proto;
+       int ret = 0;
+
+       read_lock(&proto_list_lock);
+       list_for_each_entry(proto, &proto_list, node) {
+               if (proto->init_cgroup) {
+                       ret = proto->init_cgroup(cgrp, ss);
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       read_unlock(&proto_list_lock);
+       return ret;
+out:
+       list_for_each_entry_continue_reverse(proto, &proto_list, node)
+               if (proto->destroy_cgroup)
+                       proto->destroy_cgroup(cgrp, ss);
+       read_unlock(&proto_list_lock);
+       return ret;
+}
+
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct proto *proto;
+
+       read_lock(&proto_list_lock);
+       list_for_each_entry_reverse(proto, &proto_list, node)
+               if (proto->destroy_cgroup)
+                       proto->destroy_cgroup(cgrp, ss);
+       read_unlock(&proto_list_lock);
+}
+#endif
+
 /*
  * Each address family might have different locking rules, so we have
  * one slock key per address family:
 static struct lock_class_key af_family_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
 
+struct jump_label_key memcg_socket_limit_enabled;
+EXPORT_SYMBOL(memcg_socket_limit_enabled);
+
 /*
  * Make lock validator output more readable. (we pre-construct these
  * strings build-time, so that runtime initialization of socket
@@ -221,10 +266,16 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 EXPORT_SYMBOL(sysctl_optmem_max);
 
-#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
+#if defined(CONFIG_CGROUPS)
+#if !defined(CONFIG_NET_CLS_CGROUP)
 int net_cls_subsys_id = -1;
 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
 #endif
+#if !defined(CONFIG_NETPRIO_CGROUP)
+int net_prio_subsys_id = -1;
+EXPORT_SYMBOL_GPL(net_prio_subsys_id);
+#endif
+#endif
 
 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 {
@@ -269,14 +320,14 @@ static void sock_warn_obsolete_bsdism(const char *name)
        }
 }
 
-static void sock_disable_timestamp(struct sock *sk, int flag)
+#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+
+static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 {
-       if (sock_flag(sk, flag)) {
-               sock_reset_flag(sk, flag);
-               if (!sock_flag(sk, SOCK_TIMESTAMP) &&
-                   !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
+       if (sk->sk_flags & flags) {
+               sk->sk_flags &= ~flags;
+               if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
                        net_disable_timestamp();
-               }
        }
 }
 
@@ -682,7 +733,7 @@ set_rcvbuf:
                                              SOCK_TIMESTAMPING_RX_SOFTWARE);
                else
                        sock_disable_timestamp(sk,
-                                              SOCK_TIMESTAMPING_RX_SOFTWARE);
+                                              (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
                sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
                                  val & SOF_TIMESTAMPING_SOFTWARE);
                sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
@@ -740,6 +791,11 @@ set_rcvbuf:
        case SO_RXQ_OVFL:
                sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
                break;
+
+       case SO_WIFI_STATUS:
+               sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
+               break;
+
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -961,6 +1017,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
                break;
 
+       case SO_WIFI_STATUS:
+               v.val = !!sock_flag(sk, SOCK_WIFI_STATUS);
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -1111,6 +1171,18 @@ void sock_update_classid(struct sock *sk)
                sk->sk_classid = classid;
 }
 EXPORT_SYMBOL(sock_update_classid);
+
+void sock_update_netprioidx(struct sock *sk)
+{
+       struct cgroup_netprio_state *state;
+       if (in_interrupt())
+               return;
+       rcu_read_lock();
+       state = task_netprio_state(current);
+       sk->sk_cgrp_prioidx = state ? state->prioidx : 0;
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(sock_update_netprioidx);
 #endif
 
 /**
@@ -1138,6 +1210,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                atomic_set(&sk->sk_wmem_alloc, 1);
 
                sock_update_classid(sk);
+               sock_update_netprioidx(sk);
        }
 
        return sk;
@@ -1158,8 +1231,7 @@ static void __sk_free(struct sock *sk)
                RCU_INIT_POINTER(sk->sk_filter, NULL);
        }
 
-       sock_disable_timestamp(sk, SOCK_TIMESTAMP);
-       sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
+       sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
        if (atomic_read(&sk->sk_omem_alloc))
                printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
@@ -1204,7 +1276,14 @@ void sk_release_kernel(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_release_kernel);
 
-struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+/**
+ *     sk_clone_lock - clone a socket, and lock its clone
+ *     @sk: the socket to clone
+ *     @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ *     Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 {
        struct sock *newsk;
 
@@ -1288,16 +1367,15 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
                newsk->sk_wq = NULL;
 
                if (newsk->sk_prot->sockets_allocated)
-                       percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+                       sk_sockets_allocated_inc(newsk);
 
-               if (sock_flag(newsk, SOCK_TIMESTAMP) ||
-                   sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+               if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
                        net_enable_timestamp();
        }
 out:
        return newsk;
 }
-EXPORT_SYMBOL_GPL(sk_clone);
+EXPORT_SYMBOL_GPL(sk_clone_lock);
 
 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
@@ -1677,30 +1755,34 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
        struct proto *prot = sk->sk_prot;
        int amt = sk_mem_pages(size);
        long allocated;
+       int parent_status = UNDER_LIMIT;
 
        sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-       allocated = atomic_long_add_return(amt, prot->memory_allocated);
+
+       allocated = sk_memory_allocated_add(sk, amt, &parent_status);
 
        /* Under limit. */
-       if (allocated <= prot->sysctl_mem[0]) {
-               if (prot->memory_pressure && *prot->memory_pressure)
-                       *prot->memory_pressure = 0;
+       if (parent_status == UNDER_LIMIT &&
+                       allocated <= sk_prot_mem_limits(sk, 0)) {
+               sk_leave_memory_pressure(sk);
                return 1;
        }
 
-       /* Under pressure. */
-       if (allocated > prot->sysctl_mem[1])
-               if (prot->enter_memory_pressure)
-                       prot->enter_memory_pressure(sk);
+       /* Under pressure. (we or our parents) */
+       if ((parent_status > SOFT_LIMIT) ||
+                       allocated > sk_prot_mem_limits(sk, 1))
+               sk_enter_memory_pressure(sk);
 
-       /* Over hard limit. */
-       if (allocated > prot->sysctl_mem[2])
+       /* Over hard limit (we or our parents) */
+       if ((parent_status == OVER_LIMIT) ||
+                       (allocated > sk_prot_mem_limits(sk, 2)))
                goto suppress_allocation;
 
        /* guarantee minimum buffer size under pressure */
        if (kind == SK_MEM_RECV) {
                if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
                        return 1;
+
        } else { /* SK_MEM_SEND */
                if (sk->sk_type == SOCK_STREAM) {
                        if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
@@ -1710,13 +1792,13 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
                                return 1;
        }
 
-       if (prot->memory_pressure) {
+       if (sk_has_memory_pressure(sk)) {
                int alloc;
 
-               if (!*prot->memory_pressure)
+               if (!sk_under_memory_pressure(sk))
                        return 1;
-               alloc = percpu_counter_read_positive(prot->sockets_allocated);
-               if (prot->sysctl_mem[2] > alloc *
+               alloc = sk_sockets_allocated_read_positive(sk);
+               if (sk_prot_mem_limits(sk, 2) > alloc *
                    sk_mem_pages(sk->sk_wmem_queued +
                                 atomic_read(&sk->sk_rmem_alloc) +
                                 sk->sk_forward_alloc))
@@ -1739,7 +1821,9 @@ suppress_allocation:
 
        /* Alas. Undo changes. */
        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
-       atomic_long_sub(amt, prot->memory_allocated);
+
+       sk_memory_allocated_sub(sk, amt, parent_status);
+
        return 0;
 }
 EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1750,15 +1834,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
  */
 void __sk_mem_reclaim(struct sock *sk)
 {
-       struct proto *prot = sk->sk_prot;
-
-       atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
-                  prot->memory_allocated);
+       sk_memory_allocated_sub(sk,
+                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
        sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
-       if (prot->memory_pressure && *prot->memory_pressure &&
-           (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
-               *prot->memory_pressure = 0;
+       if (sk_under_memory_pressure(sk) &&
+           (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+               sk_leave_memory_pressure(sk);
 }
 EXPORT_SYMBOL(__sk_mem_reclaim);
 
@@ -2129,16 +2211,15 @@ EXPORT_SYMBOL(sock_get_timestampns);
 void sock_enable_timestamp(struct sock *sk, int flag)
 {
        if (!sock_flag(sk, flag)) {
+               unsigned long previous_flags = sk->sk_flags;
+
                sock_set_flag(sk, flag);
                /*
                 * we just set one of the two flags which require net
                 * time stamping, but time stamping might have been on
                 * already because of the other one
                 */
-               if (!sock_flag(sk,
-                               flag == SOCK_TIMESTAMP ?
-                               SOCK_TIMESTAMPING_RX_SOFTWARE :
-                               SOCK_TIMESTAMP))
+               if (!(previous_flags & SK_FLAGS_TIMESTAMP))
                        net_enable_timestamp();
        }
 }
@@ -2250,9 +2331,6 @@ void sk_common_release(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_common_release);
 
-static DEFINE_RWLOCK(proto_list_lock);
-static LIST_HEAD(proto_list);
-
 #ifdef CONFIG_PROC_FS
 #define PROTO_INUSE_NR 64      /* should be enough for the first time */
 struct prot_inuse {
@@ -2474,16 +2552,27 @@ static char proto_method_implemented(const void *method)
 {
        return method == NULL ? 'n' : 'y';
 }
+static long sock_prot_memory_allocated(struct proto *proto)
+{
+       return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L;
+}
+
+static char *sock_prot_memory_pressure(struct proto *proto)
+{
+       return proto->memory_pressure != NULL ?
+       proto_memory_pressure(proto) ? "yes" : "no" : "NI";
+}
 
 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
 {
+
        seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
                        "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
                   proto->name,
                   proto->obj_size,
                   sock_prot_inuse_get(seq_file_net(seq), proto),
-                  proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
-                  proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+                  sock_prot_memory_allocated(proto),
+                  sock_prot_memory_pressure(proto),
                   proto->max_header,
                   proto->slab == NULL ? "no" : "yes",
                   module_name(proto->owner),
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
new file mode 100644 (file)
index 0000000..9c27bcd
--- /dev/null
@@ -0,0 +1,150 @@
+#include <linux/mutex.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <linux/module.h>
+
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static struct sock_diag_handler *sock_diag_handlers[AF_MAX];
+static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
+static DEFINE_MUTEX(sock_diag_table_mutex);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+       mutex_lock(&sock_diag_table_mutex);
+       inet_rcv_compat = fn;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat);
+
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh))
+{
+       mutex_lock(&sock_diag_table_mutex);
+       inet_rcv_compat = NULL;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
+
+int sock_diag_register(struct sock_diag_handler *hndl)
+{
+       int err = 0;
+
+       if (hndl->family >= AF_MAX)
+               return -EINVAL;
+
+       mutex_lock(&sock_diag_table_mutex);
+       if (sock_diag_handlers[hndl->family])
+               err = -EBUSY;
+       else
+               sock_diag_handlers[hndl->family] = hndl;
+       mutex_unlock(&sock_diag_table_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(sock_diag_register);
+
+void sock_diag_unregister(struct sock_diag_handler *hnld)
+{
+       int family = hnld->family;
+
+       if (family >= AF_MAX)
+               return;
+
+       mutex_lock(&sock_diag_table_mutex);
+       BUG_ON(sock_diag_handlers[family] != hnld);
+       sock_diag_handlers[family] = NULL;
+       mutex_unlock(&sock_diag_table_mutex);
+}
+EXPORT_SYMBOL_GPL(sock_diag_unregister);
+
+static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
+{
+       if (sock_diag_handlers[family] == NULL)
+               request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+                               NETLINK_SOCK_DIAG, IPPROTO_IP);
+
+       mutex_lock(&sock_diag_table_mutex);
+       return sock_diag_handlers[family];
+}
+
+static inline void sock_diag_unlock_handler(struct sock_diag_handler *h)
+{
+       mutex_unlock(&sock_diag_table_mutex);
+}
+
+static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int err;
+       struct sock_diag_req *req = NLMSG_DATA(nlh);
+       struct sock_diag_handler *hndl;
+
+       if (nlmsg_len(nlh) < sizeof(*req))
+               return -EINVAL;
+
+       hndl = sock_diag_lock_handler(req->sdiag_family);
+       if (hndl == NULL)
+               err = -ENOENT;
+       else
+               err = hndl->dump(skb, nlh);
+       sock_diag_unlock_handler(hndl);
+
+       return err;
+}
+
+static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int ret;
+
+       switch (nlh->nlmsg_type) {
+       case TCPDIAG_GETSOCK:
+       case DCCPDIAG_GETSOCK:
+               if (inet_rcv_compat == NULL)
+                       request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+                                       NETLINK_SOCK_DIAG, IPPROTO_IP);
+
+               mutex_lock(&sock_diag_table_mutex);
+               if (inet_rcv_compat != NULL)
+                       ret = inet_rcv_compat(skb, nlh);
+               else
+                       ret = -EOPNOTSUPP;
+               mutex_unlock(&sock_diag_table_mutex);
+
+               return ret;
+       case SOCK_DIAG_BY_FAMILY:
+               return __sock_diag_rcv_msg(skb, nlh);
+       default:
+               return -EINVAL;
+       }
+}
+
+static DEFINE_MUTEX(sock_diag_mutex);
+
+static void sock_diag_rcv(struct sk_buff *skb)
+{
+       mutex_lock(&sock_diag_mutex);
+       netlink_rcv_skb(skb, &sock_diag_rcv_msg);
+       mutex_unlock(&sock_diag_mutex);
+}
+
+struct sock *sock_diag_nlsk;
+EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+
+static int __init sock_diag_init(void)
+{
+       sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
+                                       sock_diag_rcv, NULL, THIS_MODULE);
+       return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __exit sock_diag_exit(void)
+{
+       netlink_kernel_release(sock_diag_nlsk);
+}
+
+module_init(sock_diag_init);
+module_exit(sock_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
index 77a65f0..d05559d 100644 (file)
@@ -68,8 +68,13 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 
                if (sock_table != orig_sock_table) {
                        rcu_assign_pointer(rps_sock_flow_table, sock_table);
-                       synchronize_rcu();
-                       vfree(orig_sock_table);
+                       if (sock_table)
+                               jump_label_inc(&rps_needed);
+                       if (orig_sock_table) {
+                               jump_label_dec(&rps_needed);
+                               synchronize_rcu();
+                               vfree(orig_sock_table);
+                       }
                }
        }
 
index 583490a..5818032 100644 (file)
@@ -357,7 +357,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
 struct dccp_skb_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index b21f261..e29214d 100644 (file)
@@ -48,11 +48,23 @@ static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                dccp_get_info(sk, _info);
 }
 
+static void dccp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       inet_diag_dump_icsk(&dccp_hashinfo, skb, cb, r, bc);
+}
+
+static int dccp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return inet_diag_dump_one_icsk(&dccp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler dccp_diag_handler = {
-       .idiag_hashinfo  = &dccp_hashinfo,
+       .dump            = dccp_diag_dump,
+       .dump_one        = dccp_diag_dump_one,
        .idiag_get_info  = dccp_diag_get_info,
-       .idiag_type      = DCCPDIAG_GETSOCK,
-       .idiag_info_size = sizeof(struct tcp_info),
+       .idiag_type      = IPPROTO_DCCP,
 };
 
 static int __init dccp_diag_init(void)
@@ -71,4 +83,4 @@ module_exit(dccp_diag_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
 MODULE_DESCRIPTION("DCCP inet_diag handler");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, DCCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 33);
index 3f4e541..1c67fe8 100644 (file)
@@ -474,10 +474,11 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
                                           struct sk_buff *skb)
 {
        struct rtable *rt;
+       const struct iphdr *iph = ip_hdr(skb);
        struct flowi4 fl4 = {
                .flowi4_oif = skb_rtable(skb)->rt_iif,
-               .daddr = ip_hdr(skb)->saddr,
-               .saddr = ip_hdr(skb)->daddr,
+               .daddr = iph->saddr,
+               .saddr = iph->daddr,
                .flowi4_tos = RT_CONN_FLAGS(sk),
                .flowi4_proto = sk->sk_protocol,
                .fl4_sport = dccp_hdr(skb)->dccph_dport,
index 17ee85c..ce903f7 100644 (file)
@@ -150,8 +150,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                         */
                        memset(&fl6, 0, sizeof(fl6));
                        fl6.flowi6_proto = IPPROTO_DCCP;
-                       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-                       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+                       fl6.daddr = np->daddr;
+                       fl6.saddr = np->saddr;
                        fl6.flowi6_oif = sk->sk_bound_dev_if;
                        fl6.fl6_dport = inet->inet_dport;
                        fl6.fl6_sport = inet->inet_sport;
@@ -244,8 +244,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_DCCP;
-       ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
-       ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+       fl6.daddr = ireq6->rmt_addr;
+       fl6.saddr = ireq6->loc_addr;
        fl6.flowlabel = 0;
        fl6.flowi6_oif = ireq6->iif;
        fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -270,7 +270,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
                dh->dccph_checksum = dccp_v6_csum_finish(skb,
                                                         &ireq6->loc_addr,
                                                         &ireq6->rmt_addr);
-               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               fl6.daddr = ireq6->rmt_addr;
                err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -313,8 +313,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
                                                            &rxip6h->daddr);
 
        memset(&fl6, 0, sizeof(fl6));
-       ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr);
-       ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr);
+       fl6.daddr = rxip6h->saddr;
+       fl6.saddr = rxip6h->daddr;
 
        fl6.flowi6_proto = IPPROTO_DCCP;
        fl6.flowi6_oif = inet6_iif(rxskb);
@@ -419,8 +419,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq6 = inet6_rsk(req);
-       ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
 
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
@@ -491,7 +491,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+               newnp->rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
                newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -526,9 +526,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_DCCP;
-               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               fl6.daddr = ireq6->rmt_addr;
                final_p = fl6_update_dst(&fl6, opt, &final);
-               ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+               fl6.saddr = ireq6->loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.fl6_dport = inet_rsk(req)->rmt_port;
                fl6.fl6_sport = inet_rsk(req)->loc_port;
@@ -559,9 +559,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
-       ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
-       ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
+       newnp->daddr = ireq6->rmt_addr;
+       newnp->saddr = ireq6->loc_addr;
+       newnp->rcv_saddr = ireq6->loc_addr;
        newsk->sk_bound_dev_if = ireq6->iif;
 
        /* Now IPv6 options...
@@ -877,7 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
-                       ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+                       usin->sin6_addr = flowlabel->dst;
                        fl6_sock_release(flowlabel);
                }
        }
@@ -910,7 +910,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        return -EINVAL;
        }
 
-       ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+       np->daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -949,8 +949,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                saddr = &np->rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_DCCP;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr);
+       fl6.daddr = np->daddr;
+       fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
@@ -966,11 +966,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               ipv6_addr_copy(&np->rcv_saddr, saddr);
+               np->rcv_saddr = *saddr;
        }
 
        /* set the source address */
-       ipv6_addr_copy(&np->saddr, saddr);
+       np->saddr = *saddr;
        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
        __ip6_dst_store(sk, dst, NULL, NULL);
index d7041a0..5a7f90b 100644 (file)
@@ -53,15 +53,15 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
        if (tw != NULL) {
                const struct inet_connection_sock *icsk = inet_csk(sk);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        const struct ipv6_pinfo *np = inet6_sk(sk);
                        struct inet6_timewait_sock *tw6;
 
                        tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
                        tw6 = inet6_twsk((struct sock *)tw);
-                       ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
-                       ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+                       tw6->tw_v6_daddr = np->daddr;
+                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
                        tw->tw_ipv6only = np->ipv6only;
                }
 #endif
@@ -100,7 +100,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
         *   (* Generate a new socket and switch to that socket *)
         *   Set S := new socket for this port pair
         */
-       struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+       struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
        if (newsk != NULL) {
                struct dccp_request_sock *dreq = dccp_rsk(req);
index 33d0e62..0a8d6eb 100644 (file)
@@ -152,6 +152,17 @@ static const struct file_operations dccpprobe_fops = {
        .llseek  = noop_llseek,
 };
 
+static __init int setup_jprobe(void)
+{
+       int ret = register_jprobe(&dccp_send_probe);
+
+       if (ret) {
+               request_module("dccp");
+               ret = register_jprobe(&dccp_send_probe);
+       }
+       return ret;
+}
+
 static __init int dccpprobe_init(void)
 {
        int ret = -ENOMEM;
@@ -163,8 +174,7 @@ static __init int dccpprobe_init(void)
        if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
                goto err0;
 
-       try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
-                               "dccp");
+       ret = setup_jprobe();
        if (ret)
                goto err1;
 
index 7f0eb08..7d2fff2 100644 (file)
@@ -107,7 +107,7 @@ struct neigh_table dn_neigh_table = {
                .gc_staletime = 60 * HZ,
                .reachable_time =               30 * HZ,
                .delay_probe_time =     5 * HZ,
-               .queue_len =            3,
+               .queue_len_bytes =      64*1024,
                .ucast_probes = 0,
                .app_probes =           0,
                .mcast_probes = 0,
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
        struct dn_route *rt = (struct dn_route *)dst;
-       struct neighbour *neigh = dst_get_neighbour(dst);
+       struct neighbour *neigh = dst_get_neighbour_noref(dst);
        struct net_device *dev = neigh->dev;
        char mac_addr[ETH_ALEN];
 
index 94f4ec0..f31ce72 100644 (file)
@@ -244,7 +244,7 @@ static int dn_dst_gc(struct dst_ops *ops)
  */
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-       struct neighbour *n = dst_get_neighbour(dst);
+       struct neighbour *n = dst_get_neighbour_noref(dst);
        u32 min_mtu = 230;
        struct dn_dev *dn;
 
@@ -713,7 +713,7 @@ out:
 static int dn_to_neigh_output(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
-       struct neighbour *n = dst_get_neighbour(dst);
+       struct neighbour *n = dst_get_neighbour_noref(dst);
 
        return n->output(n, skb);
 }
@@ -728,7 +728,7 @@ static int dn_output(struct sk_buff *skb)
 
        int err = -EINVAL;
 
-       if ((neigh = dst_get_neighbour(dst)) == NULL)
+       if ((neigh = dst_get_neighbour_noref(dst)) == NULL)
                goto error;
 
        skb->dev = dev;
@@ -852,7 +852,7 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
        }
        rt->rt_type = res->type;
 
-       if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
+       if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
                n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
                if (IS_ERR(n))
                        return PTR_ERR(n);
index c53ded2..274791c 100644 (file)
@@ -1,5 +1,5 @@
-menuconfig NET_DSA
-       bool "Distributed Switch Architecture support"
+config NET_DSA
+       tristate "Distributed Switch Architecture support"
        default n
        depends on EXPERIMENTAL && NETDEVICES && !S390
        select PHYLIB
@@ -23,38 +23,4 @@ config NET_DSA_TAG_TRAILER
        bool
        default n
 
-
-# switch drivers
-config NET_DSA_MV88E6XXX
-       bool
-       default n
-
-config NET_DSA_MV88E6060
-       bool "Marvell 88E6060 ethernet switch chip support"
-       select NET_DSA_TAG_TRAILER
-       ---help---
-         This enables support for the Marvell 88E6060 ethernet switch
-         chip.
-
-config NET_DSA_MV88E6XXX_NEED_PPU
-       bool
-       default n
-
-config NET_DSA_MV88E6131
-       bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_MV88E6XXX_NEED_PPU
-       select NET_DSA_TAG_DSA
-       ---help---
-         This enables support for the Marvell 88E6085/6095/6095F/6131
-         ethernet switch chips.
-
-config NET_DSA_MV88E6123_61_65
-       bool "Marvell 88E6123/6161/6165 ethernet switch chip support"
-       select NET_DSA_MV88E6XXX
-       select NET_DSA_TAG_EDSA
-       ---help---
-         This enables support for the Marvell 88E6123/6161/6165
-         ethernet switch chips.
-
 endif
index 2374faf..7b9fcbb 100644 (file)
@@ -1,13 +1,8 @@
-# tagging formats
-obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
-obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
-obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
-
-# switch drivers
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
-obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6123_61_65) += mv88e6123_61_65.o
-obj-$(CONFIG_NET_DSA_MV88E6131) += mv88e6131.o
-
 # the core
-obj-$(CONFIG_NET_DSA) += dsa.o slave.o
+obj-$(CONFIG_NET_DSA) += dsa_core.o
+dsa_core-y += dsa.o slave.o
+
+# tagging formats
+dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
+dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
index 0dc1589..88e7c2f 100644 (file)
@@ -29,6 +29,7 @@ void register_switch_driver(struct dsa_switch_driver *drv)
        list_add_tail(&drv->list, &dsa_switch_drivers);
        mutex_unlock(&dsa_switch_drivers_mutex);
 }
+EXPORT_SYMBOL_GPL(register_switch_driver);
 
 void unregister_switch_driver(struct dsa_switch_driver *drv)
 {
@@ -36,6 +37,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv)
        list_del_init(&drv->list);
        mutex_unlock(&dsa_switch_drivers_mutex);
 }
+EXPORT_SYMBOL_GPL(unregister_switch_driver);
 
 static struct dsa_switch_driver *
 dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
@@ -199,29 +201,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 }
 
 
-/* hooks for ethertype-less tagging formats *********************************/
-/*
- * The original DSA tag format and some other tag formats have no
- * ethertype, which means that we need to add a little hack to the
- * networking receive path to make sure that received frames get
- * the right ->protocol assigned to them when one of those tag
- * formats is in use.
- */
-bool dsa_uses_dsa_tags(void *dsa_ptr)
-{
-       struct dsa_switch_tree *dst = dsa_ptr;
-
-       return !!(dst->tag_protocol == htons(ETH_P_DSA));
-}
-
-bool dsa_uses_trailer_tags(void *dsa_ptr)
-{
-       struct dsa_switch_tree *dst = dsa_ptr;
-
-       return !!(dst->tag_protocol == htons(ETH_P_TRAILER));
-}
-
-
 /* link polling *************************************************************/
 static void dsa_link_poll_work(struct work_struct *ugly)
 {
@@ -419,12 +398,36 @@ static struct platform_driver dsa_driver = {
 
 static int __init dsa_init_module(void)
 {
-       return platform_driver_register(&dsa_driver);
+       int rc;
+
+       rc = platform_driver_register(&dsa_driver);
+       if (rc)
+               return rc;
+
+#ifdef CONFIG_NET_DSA_TAG_DSA
+       dev_add_pack(&dsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+       dev_add_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+       dev_add_pack(&trailer_packet_type);
+#endif
+       return 0;
 }
 module_init(dsa_init_module);
 
 static void __exit dsa_cleanup_module(void)
 {
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+       dev_remove_pack(&trailer_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+       dev_remove_pack(&edsa_packet_type);
+#endif
+#ifdef CONFIG_NET_DSA_TAG_DSA
+       dev_remove_pack(&dsa_packet_type);
+#endif
        platform_driver_unregister(&dsa_driver);
 }
 module_exit(dsa_cleanup_module);
index 4b0ea05..d4cf5cc 100644 (file)
 #ifndef __DSA_PRIV_H
 #define __DSA_PRIV_H
 
-#include <linux/list.h>
 #include <linux/phy.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
 #include <net/dsa.h>
 
-struct dsa_switch {
-       /*
-        * Parent switch tree, and switch index.
-        */
-       struct dsa_switch_tree  *dst;
-       int                     index;
-
-       /*
-        * Configuration data for this switch.
-        */
-       struct dsa_chip_data    *pd;
-
-       /*
-        * The used switch driver.
-        */
-       struct dsa_switch_driver        *drv;
-
-       /*
-        * Reference to mii bus to use.
-        */
-       struct mii_bus          *master_mii_bus;
-
-       /*
-        * Slave mii_bus and devices for the individual ports.
-        */
-       u32                     dsa_port_mask;
-       u32                     phys_port_mask;
-       struct mii_bus          *slave_mii_bus;
-       struct net_device       *ports[DSA_MAX_PORTS];
-};
-
-struct dsa_switch_tree {
-       /*
-        * Configuration data for the platform device that owns
-        * this dsa switch tree instance.
-        */
-       struct dsa_platform_data        *pd;
-
-       /*
-        * Reference to network device to use, and which tagging
-        * protocol to use.
-        */
-       struct net_device       *master_netdev;
-       __be16                  tag_protocol;
-
-       /*
-        * The switch and port to which the CPU is attached.
-        */
-       s8                      cpu_switch;
-       s8                      cpu_port;
-
-       /*
-        * Link state polling.
-        */
-       int                     link_poll_needed;
-       struct work_struct      link_poll_work;
-       struct timer_list       link_poll_timer;
-
-       /*
-        * Data for the individual switch chips.
-        */
-       struct dsa_switch       *ds[DSA_MAX_SWITCHES];
-};
-
-static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
-{
-       return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
-}
-
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
-{
-       struct dsa_switch_tree *dst = ds->dst;
-
-       /*
-        * If this is the root switch (i.e. the switch that connects
-        * to the CPU), return the cpu port number on this switch.
-        * Else return the (DSA) port number that connects to the
-        * switch that is one hop closer to the cpu.
-        */
-       if (dst->cpu_switch == ds->index)
-               return dst->cpu_port;
-       else
-               return ds->pd->rtable[dst->cpu_switch];
-}
-
 struct dsa_slave_priv {
        /*
         * The linux network interface corresponding to this
@@ -123,44 +35,8 @@ struct dsa_slave_priv {
        struct phy_device       *phy;
 };
 
-struct dsa_switch_driver {
-       struct list_head        list;
-
-       __be16                  tag_protocol;
-       int                     priv_size;
-
-       /*
-        * Probing and setup.
-        */
-       char    *(*probe)(struct mii_bus *bus, int sw_addr);
-       int     (*setup)(struct dsa_switch *ds);
-       int     (*set_addr)(struct dsa_switch *ds, u8 *addr);
-
-       /*
-        * Access to the switch's PHY registers.
-        */
-       int     (*phy_read)(struct dsa_switch *ds, int port, int regnum);
-       int     (*phy_write)(struct dsa_switch *ds, int port,
-                            int regnum, u16 val);
-
-       /*
-        * Link state polling and IRQ handling.
-        */
-       void    (*poll_link)(struct dsa_switch *ds);
-
-       /*
-        * ethtool hardware statistics.
-        */
-       void    (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data);
-       void    (*get_ethtool_stats)(struct dsa_switch *ds,
-                                    int port, uint64_t *data);
-       int     (*get_sset_count)(struct dsa_switch *ds);
-};
-
 /* dsa.c */
 extern char dsa_driver_version[];
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
 
 /* slave.c */
 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
@@ -170,12 +46,15 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds,
 
 /* tag_dsa.c */
 netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type dsa_packet_type;
 
 /* tag_edsa.c */
 netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type edsa_packet_type;
 
 /* tag_trailer.c */
 netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
+extern struct packet_type trailer_packet_type;
 
 
 #endif
index 98dfe80..cacce1e 100644 (file)
@@ -186,20 +186,7 @@ out:
        return 0;
 }
 
-static struct packet_type dsa_packet_type __read_mostly = {
+struct packet_type dsa_packet_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_DSA),
        .func   = dsa_rcv,
 };
-
-static int __init dsa_init_module(void)
-{
-       dev_add_pack(&dsa_packet_type);
-       return 0;
-}
-module_init(dsa_init_module);
-
-static void __exit dsa_cleanup_module(void)
-{
-       dev_remove_pack(&dsa_packet_type);
-}
-module_exit(dsa_cleanup_module);
index 6f38332..e70c43c 100644 (file)
@@ -205,20 +205,7 @@ out:
        return 0;
 }
 
-static struct packet_type edsa_packet_type __read_mostly = {
+struct packet_type edsa_packet_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_EDSA),
        .func   = edsa_rcv,
 };
-
-static int __init edsa_init_module(void)
-{
-       dev_add_pack(&edsa_packet_type);
-       return 0;
-}
-module_init(edsa_init_module);
-
-static void __exit edsa_cleanup_module(void)
-{
-       dev_remove_pack(&edsa_packet_type);
-}
-module_exit(edsa_cleanup_module);
index d6d7d0a..94bc260 100644 (file)
@@ -114,20 +114,7 @@ out:
        return 0;
 }
 
-static struct packet_type trailer_packet_type __read_mostly = {
+struct packet_type trailer_packet_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_TRAILER),
        .func   = trailer_rcv,
 };
-
-static int __init trailer_init_module(void)
-{
-       dev_add_pack(&trailer_packet_type);
-       return 0;
-}
-module_init(trailer_init_module);
-
-static void __exit trailer_cleanup_module(void)
-{
-       dev_remove_pack(&trailer_packet_type);
-}
-module_exit(trailer_cleanup_module);
index 1c1f26c..7e717cb 100644 (file)
@@ -322,6 +322,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
                /* Real hardware Econet.  We're not worthy etc. */
 #ifdef CONFIG_ECONET_NATIVE
                unsigned short proto = 0;
+               int hlen, tlen;
                int res;
 
                if (len + 15 > dev->mtu) {
@@ -331,12 +332,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 
                dev_hold(dev);
 
-               skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
+               hlen = LL_RESERVED_SPACE(dev);
+               tlen = dev->needed_tailroom;
+               skb = sock_alloc_send_skb(sk, len + hlen + tlen,
                                          msg->msg_flags & MSG_DONTWAIT, &err);
                if (skb == NULL)
                        goto out_unlock;
 
-               skb_reserve(skb, LL_RESERVED_SPACE(dev));
+               skb_reserve(skb, hlen);
                skb_reset_network_header(skb);
 
                eb = (struct ec_cb *)&skb->cb;
index 19d6aef..e4ecc1e 100644 (file)
@@ -50,8 +50,6 @@
  * SUCH DAMAGE.
  */
 
-#define DEBUG
-
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
 #include <linux/module.h>
@@ -113,6 +111,20 @@ struct lowpan_dev_record {
        struct list_head list;
 };
 
+struct lowpan_fragment {
+       struct sk_buff          *skb;           /* skb to be assembled */
+       spinlock_t              lock;           /* concurency lock */
+       u16                     length;         /* length to be assemled */
+       u32                     bytes_rcv;      /* bytes received */
+       u16                     tag;            /* current fragment tag */
+       struct timer_list       timer;          /* assembling timer */
+       struct list_head        list;           /* fragments list */
+};
+
+static unsigned short fragment_tag;
+static LIST_HEAD(lowpan_fragments);
+spinlock_t flist_lock;
+
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
@@ -234,6 +246,50 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
        return 0;
 }
 
+static void
+lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_hdr(skb);
+
+       pr_debug("(%s): UDP header compression\n", __func__);
+
+       if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
+                               LOWPAN_NHC_UDP_4BIT_PORT) &&
+           ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
+                               LOWPAN_NHC_UDP_4BIT_PORT)) {
+               pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
+               **(hc06_ptr + 1) = /* subtraction is faster */
+                  (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
+                      ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
+               *hc06_ptr += 2;
+       } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
+                       LOWPAN_NHC_UDP_8BIT_PORT) {
+               pr_debug("(%s): remove 8 bits of dest\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
+               memcpy(*hc06_ptr + 1, &uh->source, 2);
+               **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
+               *hc06_ptr += 4;
+       } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
+                       LOWPAN_NHC_UDP_8BIT_PORT) {
+               pr_debug("(%s): remove 8 bits of source\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
+               memcpy(*hc06_ptr + 1, &uh->dest, 2);
+               **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
+               *hc06_ptr += 4;
+       } else {
+               pr_debug("(%s): can't compress header\n", __func__);
+               **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
+               memcpy(*hc06_ptr + 1, &uh->source, 2);
+               memcpy(*hc06_ptr + 3, &uh->dest, 2);
+               *hc06_ptr += 5;
+       }
+
+       /* checksum is always inline */
+       memcpy(*hc06_ptr, &uh->check, 2);
+       *hc06_ptr += 2;
+}
+
 static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
 {
        u8 ret;
@@ -244,6 +300,73 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
        return ret;
 }
 
+static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+{
+       u16 ret;
+
+       BUG_ON(!pskb_may_pull(skb, 2));
+
+       ret = skb->data[0] | (skb->data[1] << 8);
+       skb_pull(skb, 2);
+       return ret;
+}
+
+static int
+lowpan_uncompress_udp_header(struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_hdr(skb);
+       u8 tmp;
+
+       tmp = lowpan_fetch_skb_u8(skb);
+
+       if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+               pr_debug("(%s): UDP header uncompression\n", __func__);
+               switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
+               case LOWPAN_NHC_UDP_CS_P_00:
+                       memcpy(&uh->source, &skb->data[0], 2);
+                       memcpy(&uh->dest, &skb->data[2], 2);
+                       skb_pull(skb, 4);
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_01:
+                       memcpy(&uh->source, &skb->data[0], 2);
+                       uh->dest =
+                          skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
+                       skb_pull(skb, 3);
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_10:
+                       uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
+                       memcpy(&uh->dest, &skb->data[1], 2);
+                       skb_pull(skb, 3);
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_11:
+                       uh->source =
+                          LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
+                       uh->dest =
+                          LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
+                       skb_pull(skb, 1);
+                       break;
+               default:
+                       pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+                       goto err;
+                       break;
+               }
+
+               pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
+                                       __func__, uh->source, uh->dest);
+
+               /* copy checksum */
+               memcpy(&uh->check, &skb->data[0], 2);
+               skb_pull(skb, 2);
+       } else {
+               pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+               goto err;
+       }
+
+       return 0;
+err:
+       return -EINVAL;
+}
+
 static int lowpan_header_create(struct sk_buff *skb,
                           struct net_device *dev,
                           unsigned short type, const void *_daddr,
@@ -342,8 +465,6 @@ static int lowpan_header_create(struct sk_buff *skb,
        if (hdr->nexthdr == UIP_PROTO_UDP)
                iphc0 |= LOWPAN_IPHC_NH_C;
 
-/* TODO: next header compression */
-
        if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
                *hc06_ptr = hdr->nexthdr;
                hc06_ptr += 1;
@@ -431,8 +552,9 @@ static int lowpan_header_create(struct sk_buff *skb,
                }
        }
 
-       /* TODO: UDP header compression */
-       /* TODO: Next Header compression */
+       /* UDP header compression */
+       if (hdr->nexthdr == UIP_PROTO_UDP)
+               lowpan_compress_udp_header(&hc06_ptr, skb);
 
        head[0] = iphc0;
        head[1] = iphc1;
@@ -467,6 +589,7 @@ static int lowpan_header_create(struct sk_buff *skb,
                memcpy(&(sa.hwaddr), saddr, 8);
 
                mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+
                return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
                                type, (void *)&da, (void *)&sa, skb->len);
        }
@@ -511,6 +634,21 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
        return stat;
 }
 
+static void lowpan_fragment_timer_expired(unsigned long entry_addr)
+{
+       struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
+
+       pr_debug("%s: timer expired for frame with tag %d\n", __func__,
+                                                               entry->tag);
+
+       spin_lock(&flist_lock);
+       list_del(&entry->list);
+       spin_unlock(&flist_lock);
+
+       dev_kfree_skb(entry->skb);
+       kfree(entry);
+}
+
 static int
 lowpan_process_data(struct sk_buff *skb)
 {
@@ -525,6 +663,107 @@ lowpan_process_data(struct sk_buff *skb)
        if (skb->len < 2)
                goto drop;
        iphc0 = lowpan_fetch_skb_u8(skb);
+
+       /* fragments assembling */
+       switch (iphc0 & LOWPAN_DISPATCH_MASK) {
+       case LOWPAN_DISPATCH_FRAG1:
+       case LOWPAN_DISPATCH_FRAGN:
+       {
+               struct lowpan_fragment *frame;
+               u8 len, offset;
+               u16 tag;
+               bool found = false;
+
+               len = lowpan_fetch_skb_u8(skb); /* frame length */
+               tag = lowpan_fetch_skb_u16(skb);
+
+               /*
+                * check if frame assembling with the same tag is
+                * already in progress
+                */
+               spin_lock(&flist_lock);
+
+               list_for_each_entry(frame, &lowpan_fragments, list)
+                       if (frame->tag == tag) {
+                               found = true;
+                               break;
+                       }
+
+               /* alloc new frame structure */
+               if (!found) {
+                       frame = kzalloc(sizeof(struct lowpan_fragment),
+                                                               GFP_ATOMIC);
+                       if (!frame)
+                               goto unlock_and_drop;
+
+                       INIT_LIST_HEAD(&frame->list);
+
+                       frame->length = (iphc0 & 7) | (len << 3);
+                       frame->tag = tag;
+
+                       /* allocate buffer for frame assembling */
+                       frame->skb = alloc_skb(frame->length +
+                                       sizeof(struct ipv6hdr), GFP_ATOMIC);
+
+                       if (!frame->skb) {
+                               kfree(frame);
+                               goto unlock_and_drop;
+                       }
+
+                       frame->skb->priority = skb->priority;
+                       frame->skb->dev = skb->dev;
+
+                       /* reserve headroom for uncompressed ipv6 header */
+                       skb_reserve(frame->skb, sizeof(struct ipv6hdr));
+                       skb_put(frame->skb, frame->length);
+
+                       init_timer(&frame->timer);
+                       /* time out is the same as for ipv6 - 60 sec */
+                       frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
+                       frame->timer.data = (unsigned long)frame;
+                       frame->timer.function = lowpan_fragment_timer_expired;
+
+                       add_timer(&frame->timer);
+
+                       list_add_tail(&frame->list, &lowpan_fragments);
+               }
+
+               if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
+                       goto unlock_and_drop;
+
+               offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+
+               /* if payload fits buffer, copy it */
+               if (likely((offset * 8 + skb->len) <= frame->length))
+                       skb_copy_to_linear_data_offset(frame->skb, offset * 8,
+                                                       skb->data, skb->len);
+               else
+                       goto unlock_and_drop;
+
+               frame->bytes_rcv += skb->len;
+
+               /* frame assembling complete */
+               if ((frame->bytes_rcv == frame->length) &&
+                    frame->timer.expires > jiffies) {
+                       /* if timer haven't expired - first of all delete it */
+                       del_timer(&frame->timer);
+                       list_del(&frame->list);
+                       spin_unlock(&flist_lock);
+
+                       dev_kfree_skb(skb);
+                       skb = frame->skb;
+                       kfree(frame);
+                       iphc0 = lowpan_fetch_skb_u8(skb);
+                       break;
+               }
+               spin_unlock(&flist_lock);
+
+               return kfree_skb(skb), 0;
+       }
+       default:
+               break;
+       }
+
        iphc1 = lowpan_fetch_skb_u8(skb);
 
        _saddr = mac_cb(skb)->sa.hwaddr;
@@ -659,7 +898,10 @@ lowpan_process_data(struct sk_buff *skb)
                        goto drop;
        }
 
-       /* TODO: UDP header parse */
+       /* UDP data uncompression */
+       if (iphc0 & LOWPAN_IPHC_NH_C)
+               if (lowpan_uncompress_udp_header(skb))
+                       goto drop;
 
        /* Not fragmented package */
        hdr.payload_len = htons(skb->len);
@@ -674,6 +916,9 @@ lowpan_process_data(struct sk_buff *skb)
        lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
                                                        sizeof(hdr));
        return lowpan_skb_deliver(skb, &hdr);
+
+unlock_and_drop:
+       spin_unlock(&flist_lock);
 drop:
        kfree_skb(skb);
        return -EINVAL;
@@ -692,18 +937,115 @@ static int lowpan_set_address(struct net_device *dev, void *p)
        return 0;
 }
 
+static int lowpan_get_mac_header_length(struct sk_buff *skb)
+{
+       /*
+        * Currently long addressing mode is supported only, so the overall
+        * header size is 21:
+        * FC SeqNum DPAN DA  SA  Sec
+        * 2  +  1  +  2 + 8 + 8 + 0  = 21
+        */
+       return 21;
+}
+
+static int
+lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
+                       int mlen, int plen, int offset)
+{
+       struct sk_buff *frag;
+       int hlen, ret;
+
+       /* if payload length is zero, therefore it's a first fragment */
+       hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE :  LOWPAN_FRAGN_HEAD_SIZE);
+
+       lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+
+       frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
+       if (!frag)
+               return -ENOMEM;
+
+       frag->priority = skb->priority;
+       frag->dev = skb->dev;
+
+       /* copy header, MFR and payload */
+       memcpy(skb_put(frag, mlen), skb->data, mlen);
+       memcpy(skb_put(frag, hlen), head, hlen);
+
+       if (plen)
+               skb_copy_from_linear_data_offset(skb, offset + mlen,
+                                       skb_put(frag, plen), plen);
+
+       lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
+                                                               frag->len);
+
+       ret = dev_queue_xmit(frag);
+
+       return ret;
+}
+
+static int
+lowpan_skb_fragmentation(struct sk_buff *skb)
+{
+       int  err, header_length, payload_length, tag, offset = 0;
+       u8 head[5];
+
+       header_length = lowpan_get_mac_header_length(skb);
+       payload_length = skb->len - header_length;
+       tag = fragment_tag++;
+
+       /* first fragment header */
+       head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
+       head[1] = (payload_length >> 3) & 0xff;
+       head[2] = tag & 0xff;
+       head[3] = tag >> 8;
+
+       err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
+
+       /* next fragment header */
+       head[0] &= ~LOWPAN_DISPATCH_FRAG1;
+       head[0] |= LOWPAN_DISPATCH_FRAGN;
+
+       while ((payload_length - offset > 0) && (err >= 0)) {
+               int len = LOWPAN_FRAG_SIZE;
+
+               head[4] = offset / 8;
+
+               if (payload_length - offset < len)
+                       len = payload_length - offset;
+
+               err = lowpan_fragment_xmit(skb, head, header_length,
+                                                       len, offset);
+               offset += len;
+       }
+
+       return err;
+}
+
 static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       int err = 0;
+       int err = -1;
 
        pr_debug("(%s): package xmit\n", __func__);
 
        skb->dev = lowpan_dev_info(dev)->real_dev;
        if (skb->dev == NULL) {
                pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
-               dev_kfree_skb(skb);
-       } else
+               goto error;
+       }
+
+       if (skb->len <= IEEE802154_MTU) {
                err = dev_queue_xmit(skb);
+               goto out;
+       }
+
+       pr_debug("(%s): frame is too big, fragmentation is needed\n",
+                                                               __func__);
+       err = lowpan_skb_fragmentation(skb);
+error:
+       dev_kfree_skb(skb);
+out:
+       if (err < 0)
+               pr_debug("(%s): ERROR: xmit failed\n", __func__);
 
        return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
 }
@@ -730,13 +1072,12 @@ static void lowpan_setup(struct net_device *dev)
        dev->addr_len           = IEEE802154_ADDR_LEN;
        memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
        dev->type               = ARPHRD_IEEE802154;
-       dev->features           = NETIF_F_NO_CSUM;
        /* Frame Control + Sequence Number + Address fields + Security Header */
        dev->hard_header_len    = 2 + 1 + 20 + 14;
        dev->needed_tailroom    = 2; /* FCS */
        dev->mtu                = 1281;
        dev->tx_queue_len       = 0;
-       dev->flags              = IFF_NOARP | IFF_BROADCAST;
+       dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
        dev->watchdog_timeo     = 0;
 
        dev->netdev_ops         = &lowpan_netdev_ops;
@@ -765,8 +1106,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        /* check that it's our buffer */
-       if ((skb->data[0] & 0xe0) == 0x60)
+       switch (skb->data[0] & 0xe0) {
+       case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
+       case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
+       case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
                lowpan_process_data(skb);
+               break;
+       default:
+               break;
+       }
 
        return NET_RX_SUCCESS;
 
index 5d8cf80..aeff3f3 100644 (file)
 #define LOWPAN_DISPATCH_FRAG1  0xc0 /* 11000xxx */
 #define LOWPAN_DISPATCH_FRAGN  0xe0 /* 11100xxx */
 
+#define LOWPAN_DISPATCH_MASK   0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT    (HZ * 60)       /* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ *   - MTU is 127 octets
+ *   - maximum MHR size is 37 octets
+ *   - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ *   MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE       88
+
 /*
  * Values of fields within the IPHC encoding first byte
  * (C stands for compressed and I for inline)
 #define LOWPAN_NHC_UDP_CHECKSUMC       0x04
 #define LOWPAN_NHC_UDP_CHECKSUMI       0x00
 
+#define LOWPAN_NHC_UDP_4BIT_PORT       0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK       0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT       0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK       0xFF00
+
 /* values for port compression, _with checksum_ ie bit 5 set to 0 */
 #define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
 #define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
index faecf64..1b09eaa 100644 (file)
@@ -209,6 +209,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        unsigned mtu;
        struct sk_buff *skb;
        struct dgram_sock *ro = dgram_sk(sk);
+       int hlen, tlen;
        int err;
 
        if (msg->msg_flags & MSG_OOB) {
@@ -229,13 +230,15 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        mtu = dev->mtu;
        pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
-       skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
                        msg->msg_flags & MSG_DONTWAIT,
                        &err);
        if (!skb)
                goto out_dev;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
 
index 10970ca..f96bae8 100644 (file)
@@ -108,6 +108,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct net_device *dev;
        unsigned mtu;
        struct sk_buff *skb;
+       int hlen, tlen;
        int err;
 
        if (msg->msg_flags & MSG_OOB) {
@@ -137,12 +138,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                goto out_dev;
        }
 
-       skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size,
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = sock_alloc_send_skb(sk, hlen + tlen + size,
                        msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
                goto out_dev;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
index cbb505b..1a8f93b 100644 (file)
@@ -409,6 +409,10 @@ config INET_TCP_DIAG
        depends on INET_DIAG
        def_tristate INET_DIAG
 
+config INET_UDP_DIAG
+       depends on INET_DIAG
+       def_tristate INET_DIAG && IPV6
+
 menuconfig TCP_CONG_ADVANCED
        bool "TCP: advanced congestion control"
        ---help---
index f2dc69c..ff75d3b 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_IP_PNP) += ipconfig.o
 obj-$(CONFIG_NETFILTER)        += netfilter.o netfilter/
 obj-$(CONFIG_INET_DIAG) += inet_diag.o 
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
+obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
+obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
index 1b5096a..f7b5670 100644 (file)
@@ -1250,7 +1250,8 @@ out:
        return err;
 }
 
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct iphdr *iph;
@@ -1572,9 +1573,9 @@ static __net_init int ipv4_mib_init_net(struct net *net)
                          sizeof(struct icmp_mib),
                          __alignof__(struct icmp_mib)) < 0)
                goto err_icmp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
-                         sizeof(struct icmpmsg_mib),
-                         __alignof__(struct icmpmsg_mib)) < 0)
+       net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
+                                             GFP_KERNEL);
+       if (!net->mib.icmpmsg_statistics)
                goto err_icmpmsg_mib;
 
        tcp_mib_init(net);
@@ -1598,7 +1599,7 @@ err_tcp_mib:
 
 static __net_exit void ipv4_mib_exit_net(struct net *net)
 {
-       snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+       kfree(net->mib.icmpmsg_statistics);
        snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
        snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
        snmp_mib_free((void __percpu **)net->mib.udp_statistics);
@@ -1671,6 +1672,8 @@ static int __init inet_init(void)
        ip_static_sysctl_init();
 #endif
 
+       tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
        /*
         *      Add all the base protocols.
         */
index 96a164a..381a087 100644 (file)
 #include <net/arp.h>
 #include <net/ax25.h>
 #include <net/netrom.h>
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-#include <net/atmclip.h>
-struct neigh_table *clip_tbl_hook;
-EXPORT_SYMBOL(clip_tbl_hook);
-#endif
 
 #include <asm/system.h>
 #include <linux/uaccess.h>
@@ -164,7 +159,6 @@ static const struct neigh_ops arp_broken_ops = {
 
 struct neigh_table arp_tbl = {
        .family         = AF_INET,
-       .entry_size     = sizeof(struct neighbour) + 4,
        .key_len        = 4,
        .hash           = arp_hash,
        .constructor    = arp_constructor,
@@ -177,7 +171,7 @@ struct neigh_table arp_tbl = {
                .gc_staletime           = 60 * HZ,
                .reachable_time         = 30 * HZ,
                .delay_probe_time       = 5 * HZ,
-               .queue_len              = 3,
+               .queue_len_bytes        = 64*1024,
                .ucast_probes           = 3,
                .mcast_probes           = 3,
                .anycast_delay          = 1 * HZ,
@@ -283,9 +277,9 @@ static int arp_constructor(struct neighbour *neigh)
                default:
                        break;
                case ARPHRD_ROSE:
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
                case ARPHRD_AX25:
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
                case ARPHRD_NETROM:
 #endif
                        neigh->ops = &arp_broken_ops;
@@ -592,16 +586,18 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
        struct sk_buff *skb;
        struct arphdr *arp;
        unsigned char *arp_ptr;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
 
        /*
         *      Allocate a buffer
         */
 
-       skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+       skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
        if (skb == NULL)
                return NULL;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
        arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
        skb->dev = dev;
@@ -633,13 +629,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                arp->ar_pro = htons(ETH_P_IP);
                break;
 
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        case ARPHRD_AX25:
                arp->ar_hrd = htons(ARPHRD_AX25);
                arp->ar_pro = htons(AX25_P_IP);
                break;
 
-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
+#if IS_ENABLED(CONFIG_NETROM)
        case ARPHRD_NETROM:
                arp->ar_hrd = htons(ARPHRD_NETROM);
                arp->ar_pro = htons(AX25_P_IP);
@@ -647,13 +643,13 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
 #endif
 #endif
 
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
        case ARPHRD_FDDI:
                arp->ar_hrd = htons(ARPHRD_ETHER);
                arp->ar_pro = htons(ETH_P_IP);
                break;
 #endif
-#if defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
+#if IS_ENABLED(CONFIG_TR)
        case ARPHRD_IEEE802_TR:
                arp->ar_hrd = htons(ARPHRD_IEEE802);
                arp->ar_pro = htons(ETH_P_IP);
@@ -1040,7 +1036,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
                        return -EINVAL;
        }
        switch (dev->type) {
-#if defined(CONFIG_FDDI) || defined(CONFIG_FDDI_MODULE)
+#if IS_ENABLED(CONFIG_FDDI)
        case ARPHRD_FDDI:
                /*
                 * According to RFC 1390, FDDI devices should accept ARP
@@ -1286,7 +1282,7 @@ void __init arp_init(void)
 }
 
 #ifdef CONFIG_PROC_FS
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
 
 /* ------------------------------------------------------------------------ */
 /*
@@ -1334,7 +1330,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
 
        read_lock(&n->lock);
        /* Convert hardware address to XX:XX:XX:XX ... form. */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        if (hatype == ARPHRD_AX25 || hatype == ARPHRD_NETROM)
                ax2asc2((ax25_address *)n->ha, hbuffer);
        else {
@@ -1347,7 +1343,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
        if (k != 0)
                --k;
        hbuffer[k] = 0;
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
        }
 #endif
        sprintf(tbuf, "%pI4", n->primary_key);
index b2ca095..fa057d1 100644 (file)
@@ -304,9 +304,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        struct igmpv3_report *pig;
        struct net *net = dev_net(dev);
        struct flowi4 fl4;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
 
        while (1) {
-               skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+               skb = alloc_skb(size + hlen + tlen,
                                GFP_ATOMIC | __GFP_NOWARN);
                if (skb)
                        break;
@@ -327,7 +329,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
        skb_dst_set(skb, &rt->dst);
        skb->dev = dev;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
        pip = ip_hdr(skb);
@@ -647,6 +649,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        __be32  group = pmc ? pmc->multiaddr : 0;
        struct flowi4 fl4;
        __be32  dst;
+       int hlen, tlen;
 
        if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
                return igmpv3_send_report(in_dev, pmc);
@@ -661,7 +664,9 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
        if (IS_ERR(rt))
                return -1;
 
-       skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
        if (skb == NULL) {
                ip_rt_put(rt);
                return -1;
@@ -669,7 +674,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
 
        skb_dst_set(skb, &rt->dst);
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        skb_reset_network_header(skb);
        iph = ip_hdr(skb);
@@ -1574,7 +1579,7 @@ out_unlock:
  * Add multicast single-source filter to the interface list
  */
 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
-       __be32 *psfsrc, int delta)
+       __be32 *psfsrc)
 {
        struct ip_sf_list *psf, *psf_prev;
 
@@ -1709,7 +1714,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
                pmc->sfcount[sfmode]++;
        err = 0;
        for (i=0; i<sfcount; i++) {
-               err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+               err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
                if (err)
                        break;
        }
index c14d88a..2e4e244 100644 (file)
@@ -418,7 +418,7 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
        return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
 #define AF_INET_FAMILY(fam) 1
@@ -588,10 +588,19 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
 }
 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
 
-struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
-                           const gfp_t priority)
+/**
+ *     inet_csk_clone_lock - clone an inet socket, and lock its clone
+ *     @sk: the socket to clone
+ *     @req: request_sock
+ *     @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ *
+ *     Caller must unlock socket even in error path (bh_unlock_sock(newsk))
+ */
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+                                const struct request_sock *req,
+                                const gfp_t priority)
 {
-       struct sock *newsk = sk_clone(sk, priority);
+       struct sock *newsk = sk_clone_lock(sk, priority);
 
        if (newsk != NULL) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
@@ -615,7 +624,7 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
        }
        return newsk;
 }
-EXPORT_SYMBOL_GPL(inet_csk_clone);
+EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 
 /*
  * At this point, there should be no process reference to this
index ccee270..575e28c 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/stddef.h>
 
 #include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
 
 static const struct inet_diag_handler **inet_diag_table;
 
@@ -45,24 +46,22 @@ struct inet_diag_entry {
        u16 userlocks;
 };
 
-static struct sock *idiagnl;
-
 #define INET_DIAG_PUT(skb, attrtype, attrlen) \
        RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
 
 static DEFINE_MUTEX(inet_diag_table_mutex);
 
-static const struct inet_diag_handler *inet_diag_lock_handler(int type)
+static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
 {
-       if (!inet_diag_table[type])
+       if (!inet_diag_table[proto])
                request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
-                              NETLINK_INET_DIAG, type);
+                              NETLINK_SOCK_DIAG, proto);
 
        mutex_lock(&inet_diag_table_mutex);
-       if (!inet_diag_table[type])
+       if (!inet_diag_table[proto])
                return ERR_PTR(-ENOENT);
 
-       return inet_diag_table[type];
+       return inet_diag_table[proto];
 }
 
 static inline void inet_diag_unlock_handler(
@@ -71,21 +70,21 @@ static inline void inet_diag_unlock_handler(
        mutex_unlock(&inet_diag_table_mutex);
 }
 
-static int inet_csk_diag_fill(struct sock *sk,
-                             struct sk_buff *skb,
-                             int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
                              const struct nlmsghdr *unlh)
 {
        const struct inet_sock *inet = inet_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_diag_msg *r;
        struct nlmsghdr  *nlh;
        void *info = NULL;
        struct inet_diag_meminfo  *minfo = NULL;
        unsigned char    *b = skb_tail_pointer(skb);
        const struct inet_diag_handler *handler;
+       int ext = req->idiag_ext;
 
-       handler = inet_diag_table[unlh->nlmsg_type];
+       handler = inet_diag_table[req->sdiag_protocol];
        BUG_ON(handler == NULL);
 
        nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
@@ -97,17 +96,6 @@ static int inet_csk_diag_fill(struct sock *sk,
        if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
                minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
 
-       if (ext & (1 << (INET_DIAG_INFO - 1)))
-               info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
-                                    handler->idiag_info_size);
-
-       if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
-               const size_t len = strlen(icsk->icsk_ca_ops->name);
-
-               strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
-                      icsk->icsk_ca_ops->name);
-       }
-
        r->idiag_family = sk->sk_family;
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
@@ -128,20 +116,32 @@ static int inet_csk_diag_fill(struct sock *sk,
        if (ext & (1 << (INET_DIAG_TOS - 1)))
                RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
                const struct ipv6_pinfo *np = inet6_sk(sk);
 
+               *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = np->daddr;
                if (ext & (1 << (INET_DIAG_TCLASS - 1)))
                        RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
-
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-                              &np->rcv_saddr);
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-                              &np->daddr);
        }
 #endif
 
+       r->idiag_uid = sock_i_uid(sk);
+       r->idiag_inode = sock_i_ino(sk);
+
+       if (minfo) {
+               minfo->idiag_rmem = sk_rmem_alloc_get(sk);
+               minfo->idiag_wmem = sk->sk_wmem_queued;
+               minfo->idiag_fmem = sk->sk_forward_alloc;
+               minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+       }
+
+       if (icsk == NULL) {
+               r->idiag_rqueue = r->idiag_wqueue = 0;
+               goto out;
+       }
+
 #define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
@@ -162,14 +162,14 @@ static int inet_csk_diag_fill(struct sock *sk,
        }
 #undef EXPIRES_IN_MS
 
-       r->idiag_uid = sock_i_uid(sk);
-       r->idiag_inode = sock_i_ino(sk);
+       if (ext & (1 << (INET_DIAG_INFO - 1)))
+               info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
 
-       if (minfo) {
-               minfo->idiag_rmem = sk_rmem_alloc_get(sk);
-               minfo->idiag_wmem = sk->sk_wmem_queued;
-               minfo->idiag_fmem = sk->sk_forward_alloc;
-               minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+       if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
+               const size_t len = strlen(icsk->icsk_ca_ops->name);
+
+               strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
+                      icsk->icsk_ca_ops->name);
        }
 
        handler->idiag_get_info(sk, r, info);
@@ -178,6 +178,7 @@ static int inet_csk_diag_fill(struct sock *sk,
            icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
                icsk->icsk_ca_ops->get_info(sk, ext, skb);
 
+out:
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        return skb->len;
 
@@ -186,10 +187,20 @@ nlmsg_failure:
        nlmsg_trim(skb, b);
        return -EMSGSIZE;
 }
+EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
+
+static int inet_csk_diag_fill(struct sock *sk,
+                             struct sk_buff *skb, struct inet_diag_req *req,
+                             u32 pid, u32 seq, u16 nlmsg_flags,
+                             const struct nlmsghdr *unlh)
+{
+       return inet_sk_diag_fill(sk, inet_csk(sk),
+                       skb, req, pid, seq, nlmsg_flags, unlh);
+}
 
 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
-                              struct sk_buff *skb, int ext, u32 pid,
-                              u32 seq, u16 nlmsg_flags,
+                              struct sk_buff *skb, struct inet_diag_req *req,
+                              u32 pid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
        long tmo;
@@ -223,15 +234,13 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
        r->idiag_inode        = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (tw->tw_family == AF_INET6) {
                const struct inet6_timewait_sock *tw6 =
                                                inet6_twsk((struct sock *)tw);
 
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-                              &tw6->tw_v6_rcv_saddr);
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-                              &tw6->tw_v6_daddr);
+               *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
        }
 #endif
        nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
@@ -242,42 +251,43 @@ nlmsg_failure:
 }
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
-                       int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+                       struct inet_diag_req *r, u32 pid, u32 seq, u16 nlmsg_flags,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
                return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, ext, pid, seq, nlmsg_flags,
+                                          skb, r, pid, seq, nlmsg_flags,
                                           unlh);
-       return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
+       return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh);
 }
 
-static int inet_diag_get_exact(struct sk_buff *in_skb,
-                              const struct nlmsghdr *nlh)
+int inet_diag_check_cookie(struct sock *sk, struct inet_diag_req *req)
+{
+       if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
+            req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
+           ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
+            (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+               return -ESTALE;
+       else
+               return 0;
+}
+EXPORT_SYMBOL_GPL(inet_diag_check_cookie);
+
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
+               const struct nlmsghdr *nlh, struct inet_diag_req *req)
 {
        int err;
        struct sock *sk;
-       struct inet_diag_req *req = NLMSG_DATA(nlh);
        struct sk_buff *rep;
-       struct inet_hashinfo *hashinfo;
-       const struct inet_diag_handler *handler;
-
-       handler = inet_diag_lock_handler(nlh->nlmsg_type);
-       if (IS_ERR(handler)) {
-               err = PTR_ERR(handler);
-               goto unlock;
-       }
 
-       hashinfo = handler->idiag_hashinfo;
        err = -EINVAL;
-
-       if (req->idiag_family == AF_INET) {
+       if (req->sdiag_family == AF_INET) {
                sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
        }
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-       else if (req->idiag_family == AF_INET6) {
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6) {
                sk = inet6_lookup(&init_net, hashinfo,
                                  (struct in6_addr *)req->id.idiag_dst,
                                  req->id.idiag_dport,
@@ -287,29 +297,26 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
        }
 #endif
        else {
-               goto unlock;
+               goto out_nosk;
        }
 
        err = -ENOENT;
        if (sk == NULL)
-               goto unlock;
+               goto out_nosk;
 
-       err = -ESTALE;
-       if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
-            req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
-           ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
-            (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
+       err = inet_diag_check_cookie(sk, req);
+       if (err)
                goto out;
 
        err = -ENOMEM;
        rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
                                     sizeof(struct inet_diag_meminfo) +
-                                    handler->idiag_info_size + 64)),
+                                    sizeof(struct tcp_info) + 64)),
                        GFP_KERNEL);
        if (!rep)
                goto out;
 
-       err = sk_diag_fill(sk, rep, req->idiag_ext,
+       err = sk_diag_fill(sk, rep, req,
                           NETLINK_CB(in_skb).pid,
                           nlh->nlmsg_seq, 0, nlh);
        if (err < 0) {
@@ -317,7 +324,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
                kfree_skb(rep);
                goto out;
        }
-       err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
                              MSG_DONTWAIT);
        if (err > 0)
                err = 0;
@@ -329,8 +336,25 @@ out:
                else
                        sock_put(sk);
        }
-unlock:
+out_nosk:
+       return err;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
+
+static int inet_diag_get_exact(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh,
+                              struct inet_diag_req *req)
+{
+       const struct inet_diag_handler *handler;
+       int err;
+
+       handler = inet_diag_lock_handler(req->sdiag_protocol);
+       if (IS_ERR(handler))
+               err = PTR_ERR(handler);
+       else
+               err = handler->dump_one(in_skb, nlh, req);
        inet_diag_unlock_handler(handler);
+
        return err;
 }
 
@@ -361,9 +385,12 @@ static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
 }
 
 
-static int inet_diag_bc_run(const void *bc, int len,
-                           const struct inet_diag_entry *entry)
+static int inet_diag_bc_run(const struct nlattr *_bc,
+               const struct inet_diag_entry *entry)
 {
+       const void *bc = nla_data(_bc);
+       int len = nla_len(_bc);
+
        while (len > 0) {
                int yes = 1;
                const struct inet_diag_bc_op *op = bc;
@@ -437,6 +464,35 @@ static int inet_diag_bc_run(const void *bc, int len,
        return len == 0;
 }
 
+int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
+{
+       struct inet_diag_entry entry;
+       struct inet_sock *inet = inet_sk(sk);
+
+       if (bc == NULL)
+               return 1;
+
+       entry.family = sk->sk_family;
+#if IS_ENABLED(CONFIG_IPV6)
+       if (entry.family == AF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+
+               entry.saddr = np->rcv_saddr.s6_addr32;
+               entry.daddr = np->daddr.s6_addr32;
+       } else
+#endif
+       {
+               entry.saddr = &inet->inet_rcv_saddr;
+               entry.daddr = &inet->inet_daddr;
+       }
+       entry.sport = inet->inet_num;
+       entry.dport = ntohs(inet->inet_dport);
+       entry.userlocks = sk->sk_userlocks;
+
+       return inet_diag_bc_run(bc, &entry);
+}
+EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
+
 static int valid_cc(const void *bc, int len, int cc)
 {
        while (len >= 0) {
@@ -493,57 +549,29 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
 
 static int inet_csk_diag_dump(struct sock *sk,
                              struct sk_buff *skb,
-                             struct netlink_callback *cb)
+                             struct netlink_callback *cb,
+                             struct inet_diag_req *r,
+                             const struct nlattr *bc)
 {
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
 
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-               struct inet_diag_entry entry;
-               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-                                                         sizeof(*r),
-                                                         INET_DIAG_REQ_BYTECODE);
-               struct inet_sock *inet = inet_sk(sk);
-
-               entry.family = sk->sk_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-               if (entry.family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
-
-                       entry.saddr = np->rcv_saddr.s6_addr32;
-                       entry.daddr = np->daddr.s6_addr32;
-               } else
-#endif
-               {
-                       entry.saddr = &inet->inet_rcv_saddr;
-                       entry.daddr = &inet->inet_daddr;
-               }
-               entry.sport = inet->inet_num;
-               entry.dport = ntohs(inet->inet_dport);
-               entry.userlocks = sk->sk_userlocks;
-
-               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
-                       return 0;
-       }
-
-       return inet_csk_diag_fill(sk, skb, r->idiag_ext,
+       return inet_csk_diag_fill(sk, skb, r,
                                  NETLINK_CB(cb->skb).pid,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                               struct sk_buff *skb,
-                              struct netlink_callback *cb)
+                              struct netlink_callback *cb,
+                              struct inet_diag_req *r,
+                              const struct nlattr *bc)
 {
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+       if (bc != NULL) {
                struct inet_diag_entry entry;
-               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
-                                                         sizeof(*r),
-                                                         INET_DIAG_REQ_BYTECODE);
 
                entry.family = tw->tw_family;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == AF_INET6) {
                        struct inet6_timewait_sock *tw6 =
                                                inet6_twsk((struct sock *)tw);
@@ -559,11 +587,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                entry.dport = ntohs(tw->tw_dport);
                entry.userlocks = 0;
 
-               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
+               if (!inet_diag_bc_run(bc, &entry))
                        return 0;
        }
 
-       return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
+       return inet_twsk_diag_fill(tw, skb, r,
                                   NETLINK_CB(cb->skb).pid,
                                   cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
@@ -605,12 +633,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
        r->idiag_wqueue = 0;
        r->idiag_uid = sock_i_uid(sk);
        r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
-                              &inet6_rsk(req)->loc_addr);
-               ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
-                              &inet6_rsk(req)->rmt_addr);
+               *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
+               *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
        }
 #endif
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -623,13 +649,13 @@ nlmsg_failure:
 }
 
 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
-                              struct netlink_callback *cb)
+                              struct netlink_callback *cb,
+                              struct inet_diag_req *r,
+                              const struct nlattr *bc)
 {
        struct inet_diag_entry entry;
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt;
-       const struct nlattr *bc = NULL;
        struct inet_sock *inet = inet_sk(sk);
        int j, s_j;
        int reqnum, s_reqnum;
@@ -649,9 +675,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        if (!lopt || !lopt->qlen)
                goto out;
 
-       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
-               bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
-                                    INET_DIAG_REQ_BYTECODE);
+       if (bc != NULL) {
                entry.sport = inet->inet_num;
                entry.userlocks = sk->sk_userlocks;
        }
@@ -671,21 +695,20 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
                        if (bc) {
                                entry.saddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                                        (entry.family == AF_INET6) ?
                                        inet6_rsk(req)->loc_addr.s6_addr32 :
 #endif
                                        &ireq->loc_addr;
                                entry.daddr =
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                                        (entry.family == AF_INET6) ?
                                        inet6_rsk(req)->rmt_addr.s6_addr32 :
 #endif
                                        &ireq->rmt_addr;
                                entry.dport = ntohs(ireq->rmt_port);
 
-                               if (!inet_diag_bc_run(nla_data(bc),
-                                                     nla_len(bc), &entry))
+                               if (!inet_diag_bc_run(bc, &entry))
                                        continue;
                        }
 
@@ -708,19 +731,11 @@ out:
        return err;
 }
 
-static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *r, struct nlattr *bc)
 {
        int i, num;
        int s_i, s_num;
-       struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
-       const struct inet_diag_handler *handler;
-       struct inet_hashinfo *hashinfo;
-
-       handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
-       if (IS_ERR(handler))
-               goto unlock;
-
-       hashinfo = handler->idiag_hashinfo;
 
        s_i = cb->args[1];
        s_num = num = cb->args[2];
@@ -745,6 +760,10 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                        continue;
                                }
 
+                               if (r->sdiag_family != AF_UNSPEC &&
+                                               sk->sk_family != r->sdiag_family)
+                                       goto next_listen;
+
                                if (r->id.idiag_sport != inet->inet_sport &&
                                    r->id.idiag_sport)
                                        goto next_listen;
@@ -754,7 +773,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                    cb->args[3] > 0)
                                        goto syn_recv;
 
-                               if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+                               if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -763,7 +782,7 @@ syn_recv:
                                if (!(r->idiag_states & TCPF_SYN_RECV))
                                        goto next_listen;
 
-                               if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
+                               if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
                                        spin_unlock_bh(&ilb->lock);
                                        goto done;
                                }
@@ -785,7 +804,7 @@ skip_listen_ht:
        }
 
        if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
-               goto unlock;
+               goto out;
 
        for (i = s_i; i <= hashinfo->ehash_mask; i++) {
                struct inet_ehash_bucket *head = &hashinfo->ehash[i];
@@ -810,13 +829,16 @@ skip_listen_ht:
                                goto next_normal;
                        if (!(r->idiag_states & (1 << sk->sk_state)))
                                goto next_normal;
+                       if (r->sdiag_family != AF_UNSPEC &&
+                                       sk->sk_family != r->sdiag_family)
+                               goto next_normal;
                        if (r->id.idiag_sport != inet->inet_sport &&
                            r->id.idiag_sport)
                                goto next_normal;
                        if (r->id.idiag_dport != inet->inet_dport &&
                            r->id.idiag_dport)
                                goto next_normal;
-                       if (inet_csk_diag_dump(sk, skb, cb) < 0) {
+                       if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
                                spin_unlock_bh(lock);
                                goto done;
                        }
@@ -832,13 +854,16 @@ next_normal:
 
                                if (num < s_num)
                                        goto next_dying;
+                               if (r->sdiag_family != AF_UNSPEC &&
+                                               tw->tw_family != r->sdiag_family)
+                                       goto next_dying;
                                if (r->id.idiag_sport != tw->tw_sport &&
                                    r->id.idiag_sport)
                                        goto next_dying;
                                if (r->id.idiag_dport != tw->tw_dport &&
                                    r->id.idiag_dport)
                                        goto next_dying;
-                               if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
+                               if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
                                        spin_unlock_bh(lock);
                                        goto done;
                                }
@@ -852,15 +877,85 @@ next_dying:
 done:
        cb->args[1] = i;
        cb->args[2] = num;
-unlock:
+out:
+       ;
+}
+EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
+
+static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       const struct inet_diag_handler *handler;
+
+       handler = inet_diag_lock_handler(r->sdiag_protocol);
+       if (!IS_ERR(handler))
+               handler->dump(skb, cb, r, bc);
        inet_diag_unlock_handler(handler);
+
        return skb->len;
 }
 
-static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+       struct nlattr *bc = NULL;
        int hdrlen = sizeof(struct inet_diag_req);
 
+       if (nlmsg_attrlen(cb->nlh, hdrlen))
+               bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+       return __inet_diag_dump(skb, cb, (struct inet_diag_req *)NLMSG_DATA(cb->nlh), bc);
+}
+
+static inline int inet_diag_type2proto(int type)
+{
+       switch (type) {
+       case TCPDIAG_GETSOCK:
+               return IPPROTO_TCP;
+       case DCCPDIAG_GETSOCK:
+               return IPPROTO_DCCP;
+       default:
+               return 0;
+       }
+}
+
+static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct inet_diag_req_compat *rc = NLMSG_DATA(cb->nlh);
+       struct inet_diag_req req;
+       struct nlattr *bc = NULL;
+       int hdrlen = sizeof(struct inet_diag_req_compat);
+
+       req.sdiag_family = AF_UNSPEC; /* compatibility */
+       req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+
+       if (nlmsg_attrlen(cb->nlh, hdrlen))
+               bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
+
+       return __inet_diag_dump(skb, cb, &req, bc);
+}
+
+static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
+                              const struct nlmsghdr *nlh)
+{
+       struct inet_diag_req_compat *rc = NLMSG_DATA(nlh);
+       struct inet_diag_req req;
+
+       req.sdiag_family = rc->idiag_family;
+       req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
+       req.idiag_ext = rc->idiag_ext;
+       req.idiag_states = rc->idiag_states;
+       req.id = rc->id;
+
+       return inet_diag_get_exact(in_skb, nlh, &req);
+}
+
+static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int hdrlen = sizeof(struct inet_diag_req_compat);
+
        if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
            nlmsg_len(nlh) < hdrlen)
                return -EINVAL;
@@ -877,28 +972,54 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                return -EINVAL;
                }
 
-               return netlink_dump_start(idiagnl, skb, nlh,
-                                         inet_diag_dump, NULL, 0);
+               return netlink_dump_start(sock_diag_nlsk, skb, nlh,
+                                         inet_diag_dump_compat, NULL, 0);
        }
 
-       return inet_diag_get_exact(skb, nlh);
+       return inet_diag_get_exact_compat(skb, nlh);
 }
 
-static DEFINE_MUTEX(inet_diag_mutex);
-
-static void inet_diag_rcv(struct sk_buff *skb)
+static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
 {
-       mutex_lock(&inet_diag_mutex);
-       netlink_rcv_skb(skb, &inet_diag_rcv_msg);
-       mutex_unlock(&inet_diag_mutex);
+       int hdrlen = sizeof(struct inet_diag_req);
+
+       if (nlmsg_len(h) < hdrlen)
+               return -EINVAL;
+
+       if (h->nlmsg_flags & NLM_F_DUMP) {
+               if (nlmsg_attrlen(h, hdrlen)) {
+                       struct nlattr *attr;
+                       attr = nlmsg_find_attr(h, hdrlen,
+                                              INET_DIAG_REQ_BYTECODE);
+                       if (attr == NULL ||
+                           nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
+                           inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
+                               return -EINVAL;
+               }
+
+               return netlink_dump_start(sock_diag_nlsk, skb, h,
+                                         inet_diag_dump, NULL, 0);
+       }
+
+       return inet_diag_get_exact(skb, h, (struct inet_diag_req *)NLMSG_DATA(h));
 }
 
+static struct sock_diag_handler inet_diag_handler = {
+       .family = AF_INET,
+       .dump = inet_diag_handler_dump,
+};
+
+static struct sock_diag_handler inet6_diag_handler = {
+       .family = AF_INET6,
+       .dump = inet_diag_handler_dump,
+};
+
 int inet_diag_register(const struct inet_diag_handler *h)
 {
        const __u16 type = h->idiag_type;
        int err = -EINVAL;
 
-       if (type >= INET_DIAG_GETSOCK_MAX)
+       if (type >= IPPROTO_MAX)
                goto out;
 
        mutex_lock(&inet_diag_table_mutex);
@@ -917,7 +1038,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
 {
        const __u16 type = h->idiag_type;
 
-       if (type >= INET_DIAG_GETSOCK_MAX)
+       if (type >= IPPROTO_MAX)
                return;
 
        mutex_lock(&inet_diag_table_mutex);
@@ -928,7 +1049,7 @@ EXPORT_SYMBOL_GPL(inet_diag_unregister);
 
 static int __init inet_diag_init(void)
 {
-       const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
+       const int inet_diag_table_size = (IPPROTO_MAX *
                                          sizeof(struct inet_diag_handler *));
        int err = -ENOMEM;
 
@@ -936,25 +1057,34 @@ static int __init inet_diag_init(void)
        if (!inet_diag_table)
                goto out;
 
-       idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0,
-                                       inet_diag_rcv, NULL, THIS_MODULE);
-       if (idiagnl == NULL)
-               goto out_free_table;
-       err = 0;
+       err = sock_diag_register(&inet_diag_handler);
+       if (err)
+               goto out_free_nl;
+
+       err = sock_diag_register(&inet6_diag_handler);
+       if (err)
+               goto out_free_inet;
+
+       sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
 out:
        return err;
-out_free_table:
+
+out_free_inet:
+       sock_diag_unregister(&inet_diag_handler);
+out_free_nl:
        kfree(inet_diag_table);
        goto out;
 }
 
 static void __exit inet_diag_exit(void)
 {
-       netlink_kernel_release(idiagnl);
+       sock_diag_unregister(&inet6_diag_handler);
+       sock_diag_unregister(&inet_diag_handler);
+       sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
        kfree(inet_diag_table);
 }
 
 module_init(inet_diag_init);
 module_exit(inet_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 0);
index d55110e..2b53a1f 100644 (file)
@@ -46,7 +46,7 @@
 #include <net/rtnetlink.h>
 #include <net/gre.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -171,7 +171,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
 {
@@ -729,9 +729,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        if ((dst = rt->rt_gateway) == 0)
                                goto tx_error_icmp;
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6)) {
-                       struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
+                       struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
                        const struct in6_addr *addr6;
                        int addr_type;
 
@@ -799,7 +799,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        goto tx_error;
                }
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
                struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
 
@@ -835,6 +835,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
            (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
                struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+               if (max_headroom > dev->needed_headroom)
+                       dev->needed_headroom = max_headroom;
                if (!new_skb) {
                        ip_rt_put(rt);
                        dev->stats.tx_dropped++;
@@ -873,7 +875,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
        if ((iph->ttl = tiph->ttl) == 0) {
                if (skb->protocol == htons(ETH_P_IP))
                        iph->ttl = old_iph->ttl;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (skb->protocol == htons(ETH_P_IPV6))
                        iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
 #endif
index 0bc95f3..ff302bd 100644 (file)
@@ -206,7 +206,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh) {
                int res = neigh_output(neigh, skb);
 
@@ -319,6 +319,20 @@ int ip_output(struct sk_buff *skb)
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
+/*
+ * copy saddr and daddr, possibly using 64bit load/stores
+ * Equivalent to :
+ *   iph->saddr = fl4->saddr;
+ *   iph->daddr = fl4->daddr;
+ */
+static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
+{
+       BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
+                    offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
+       memcpy(&iph->saddr, &fl4->saddr,
+              sizeof(fl4->saddr) + sizeof(fl4->daddr));
+}
+
 int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
 {
        struct sock *sk = skb->sk;
@@ -381,8 +395,8 @@ packet_routed:
                iph->frag_off = 0;
        iph->ttl      = ip_select_ttl(inet, &rt->dst);
        iph->protocol = sk->sk_protocol;
-       iph->saddr    = fl4->saddr;
-       iph->daddr    = fl4->daddr;
+       ip_copy_addrs(iph, fl4);
+
        /* Transport layer set skb->h.foo itself. */
 
        if (inet_opt && inet_opt->opt.optlen) {
@@ -1337,8 +1351,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        ip_select_ident(iph, &rt->dst, sk);
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
-       iph->saddr = fl4->saddr;
-       iph->daddr = fl4->daddr;
+       ip_copy_addrs(iph, fl4);
 
        if (opt) {
                iph->ihl += opt->optlen>>2;
index 09ff51b..8aa87c1 100644 (file)
@@ -37,7 +37,7 @@
 #include <net/route.h>
 #include <net/xfrm.h>
 #include <net/compat.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <net/transp_v6.h>
 #endif
 
 /*
  *     SOL_IP control messages.
  */
+#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
 
 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
 {
-       struct in_pktinfo info;
-       struct rtable *rt = skb_rtable(skb);
+       struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
 
        info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
-       if (rt) {
-               info.ipi_ifindex = rt->rt_iif;
-               info.ipi_spec_dst.s_addr = rt->rt_spec_dst;
-       } else {
-               info.ipi_ifindex = 0;
-               info.ipi_spec_dst.s_addr = 0;
-       }
 
        put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
 }
@@ -515,7 +508,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                                sock_owned_by_user(sk));
                if (inet->is_icsk) {
                        struct inet_connection_sock *icsk = inet_csk(sk);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        if (sk->sk_family == PF_INET ||
                            (!((1 << sk->sk_state) &
                               (TCPF_LISTEN | TCPF_CLOSE)) &&
@@ -526,7 +519,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                                if (opt)
                                        icsk->icsk_ext_hdr_len += opt->opt.optlen;
                                icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        }
 #endif
                }
@@ -992,20 +985,28 @@ e_inval:
 }
 
 /**
- * ip_queue_rcv_skb - Queue an skb into sock receive queue
+ * ipv4_pktinfo_prepare - transfert some info from rtable to skb
  * @sk: socket
  * @skb: buffer
  *
- * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option
- * is not set, we drop skb dst entry now, while dst cache line is hot.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
+ * in skb->cb[] before dst drop.
+ * This way, receiver doesnt make cache line misses to read rtable.
  */
-int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+void ipv4_pktinfo_prepare(struct sk_buff *skb)
 {
-       if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
-               skb_dst_drop(skb);
-       return sock_queue_rcv_skb(sk, skb);
+       struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
+       const struct rtable *rt = skb_rtable(skb);
+
+       if (rt) {
+               pktinfo->ipi_ifindex = rt->rt_iif;
+               pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+       } else {
+               pktinfo->ipi_ifindex = 0;
+               pktinfo->ipi_spec_dst.s_addr = 0;
+       }
+       skb_dst_drop(skb);
 }
-EXPORT_SYMBOL(ip_queue_rcv_skb);
 
 int ip_setsockopt(struct sock *sk, int level,
                int optname, char __user *optval, unsigned int optlen)
index 0da2afc..915eb52 100644 (file)
@@ -763,13 +763,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
        struct sk_buff *skb;
        struct bootp_pkt *b;
        struct iphdr *h;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
 
        /* Allocate packet */
-       skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+       skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15,
                        GFP_KERNEL);
        if (!skb)
                return;
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
        memset(b, 0, sizeof(struct bootp_pkt));
 
@@ -822,8 +824,13 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
        skb->dev = dev;
        skb->protocol = htons(ETH_P_IP);
        if (dev_hard_header(skb, dev, ntohs(skb->protocol),
-                           dev->broadcast, dev->dev_addr, skb->len) < 0 ||
-           dev_queue_xmit(skb) < 0)
+                           dev->broadcast, dev->dev_addr, skb->len) < 0) {
+               kfree_skb(skb);
+               printk("E");
+               return;
+       }
+
+       if (dev_queue_xmit(skb) < 0)
                printk("E");
 }
 
index 0b2e732..413ed1b 100644 (file)
@@ -148,7 +148,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipip_get_stats(struct net_device *dev)
 {
index 76a7f07..8e54490 100644 (file)
@@ -1520,7 +1520,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
        struct mr_table *mrt;
        struct vif_device *v;
        int ct;
-       LIST_HEAD(list);
 
        if (event != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
@@ -1529,10 +1528,9 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
                v = &mrt->vif_table[0];
                for (ct = 0; ct < mrt->maxvif; ct++, v++) {
                        if (v->dev == dev)
-                               vif_delete(mrt, ct, 1, &list);
+                               vif_delete(mrt, ct, 1, NULL);
                }
        }
-       unregister_netdevice_many(&list);
        return NOTIFY_DONE;
 }
 
index e59aabd..a057fe6 100644 (file)
@@ -404,6 +404,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
        int status, type, pid, flags;
        unsigned int nlmsglen, skblen;
        struct nlmsghdr *nlh;
+       bool enable_timestamp = false;
 
        skblen = skb->len;
        if (skblen < sizeof(*nlh))
@@ -441,12 +442,13 @@ __ipq_rcv_skb(struct sk_buff *skb)
                        RCV_SKB_FAIL(-EBUSY);
                }
        } else {
-               net_enable_timestamp();
+               enable_timestamp = true;
                peer_pid = pid;
        }
 
        spin_unlock_bh(&queue_lock);
-
+       if (enable_timestamp)
+               net_enable_timestamp();
        status = ipq_receive_peer(NLMSG_DATA(nlh), type,
                                  nlmsglen - NLMSG_LENGTH(0));
        if (status < 0)
index 466ea8b..3569d8e 100644 (file)
@@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
 
        local_bh_disable();
        orphans = percpu_counter_sum_positive(&tcp_orphan_count);
-       sockets = percpu_counter_sum_positive(&tcp_sockets_allocated);
+       sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
        local_bh_enable();
 
        socket_seq_show(seq);
        seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
                   sock_prot_inuse_get(net, &tcp_prot), orphans,
                   tcp_death_row.tw_count, sockets,
-                  atomic_long_read(&tcp_memory_allocated));
+                  proto_memory_allocated(&tcp_prot));
        seq_printf(seq, "UDP: inuse %d mem %ld\n",
                   sock_prot_inuse_get(net, &udp_prot),
-                  atomic_long_read(&udp_memory_allocated));
+                  proto_memory_allocated(&udp_prot));
        seq_printf(seq, "UDPLITE: inuse %d\n",
                   sock_prot_inuse_get(net, &udplite_prot));
        seq_printf(seq, "RAW: inuse %d\n",
@@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq)
 
        count = 0;
        for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
-               val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
+               val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]);
                if (val) {
                        type[count] = i;
                        vals[count++] = val;
@@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq)
 {
        int i;
        struct net *net = seq->private;
+       atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs;
 
        seq_puts(seq, "\nIcmp: InMsgs InErrors");
        for (i=0; icmpmibmap[i].name != NULL; i++)
@@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq)
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
        for (i=0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                       snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-                               icmpmibmap[i].index));
+                          atomic_long_read(ptr + icmpmibmap[i].index));
        seq_printf(seq, " %lu %lu",
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
                snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
        for (i=0; icmpmibmap[i].name != NULL; i++)
                seq_printf(seq, " %lu",
-                       snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
-                               icmpmibmap[i].index | 0x100));
+                          atomic_long_read(ptr + (icmpmibmap[i].index | 0x100)));
 }
 
 /*
index 007e2eb..3ccda5a 100644 (file)
@@ -292,7 +292,8 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
 {
        /* Charge it to the socket. */
 
-       if (ip_queue_rcv_skb(sk, skb) < 0) {
+       ipv4_pktinfo_prepare(skb);
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -327,6 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
        unsigned int iphlen;
        int err;
        struct rtable *rt = *rtp;
+       int hlen, tlen;
 
        if (length > rt->dst.dev->mtu) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -336,12 +338,14 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
        if (flags&MSG_PROBE)
                goto out;
 
+       hlen = LL_RESERVED_SPACE(rt->dst.dev);
+       tlen = rt->dst.dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk,
-                                 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+                                 length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto error;
-       skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+       skb_reserve(skb, hlen);
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
index 46af623..f30112f 100644 (file)
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
-#include <net/atmclip.h>
 #include <net/secure_seq.h>
 
 #define RT_FL_TOS(oldflp4) \
@@ -420,7 +419,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                int len, HHUptod;
 
                rcu_read_lock();
-               n = dst_get_neighbour(&r->dst);
+               n = dst_get_neighbour_noref(&r->dst);
                HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
                rcu_read_unlock();
 
@@ -1019,23 +1018,18 @@ static int slow_chain_length(const struct rtable *head)
 
 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
 {
-       struct neigh_table *tbl = &arp_tbl;
        static const __be32 inaddr_any = 0;
        struct net_device *dev = dst->dev;
        const __be32 *pkey = daddr;
        struct neighbour *n;
 
-#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
-       if (dev->type == ARPHRD_ATM)
-               tbl = clip_tbl_hook;
-#endif
        if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
                pkey = &inaddr_any;
 
-       n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
+       n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey);
        if (n)
                return n;
-       return neigh_create(tbl, pkey, dev);
+       return neigh_create(&arp_tbl, pkey, dev);
 }
 
 static int rt_bind_neighbour(struct rtable *rt)
index 69fd720..4aa7e9d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/nsproxy.h>
+#include <linux/swap.h>
 #include <net/snmp.h>
 #include <net/icmp.h>
 #include <net/ip.h>
@@ -23,6 +24,7 @@
 #include <net/cipso_ipv4.h>
 #include <net/inet_frag.h>
 #include <net/ping.h>
+#include <net/tcp_memcontrol.h>
 
 static int zero;
 static int tcp_retr1_max = 255;
@@ -73,7 +75,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
 }
 
 
-void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
+static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
 {
        gid_t *data = table->data;
        unsigned seq;
@@ -86,7 +88,7 @@ void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t
 }
 
 /* Update system visible IP port range */
-static void set_ping_group_range(struct ctl_table *table, int range[2])
+static void set_ping_group_range(struct ctl_table *table, gid_t range[2])
 {
        gid_t *data = table->data;
        write_seqlock(&sysctl_local_ports.lock);
@@ -174,6 +176,49 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
        return ret;
 }
 
+static int ipv4_tcp_mem(ctl_table *ctl, int write,
+                          void __user *buffer, size_t *lenp,
+                          loff_t *ppos)
+{
+       int ret;
+       unsigned long vec[3];
+       struct net *net = current->nsproxy->net_ns;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       struct mem_cgroup *memcg;
+#endif
+
+       ctl_table tmp = {
+               .data = &vec,
+               .maxlen = sizeof(vec),
+               .mode = ctl->mode,
+       };
+
+       if (!write) {
+               ctl->data = &net->ipv4.sysctl_tcp_mem;
+               return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
+       }
+
+       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
+       if (ret)
+               return ret;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       rcu_read_lock();
+       memcg = mem_cgroup_from_task(current);
+
+       tcp_prot_mem(memcg, vec[0], 0);
+       tcp_prot_mem(memcg, vec[1], 1);
+       tcp_prot_mem(memcg, vec[2], 2);
+       rcu_read_unlock();
+#endif
+
+       net->ipv4.sysctl_tcp_mem[0] = vec[0];
+       net->ipv4.sysctl_tcp_mem[1] = vec[1];
+       net->ipv4.sysctl_tcp_mem[2] = vec[2];
+
+       return 0;
+}
+
 static struct ctl_table ipv4_table[] = {
        {
                .procname       = "tcp_timestamps",
@@ -433,13 +478,6 @@ static struct ctl_table ipv4_table[] = {
                .proc_handler   = proc_dointvec
        },
        {
-               .procname       = "tcp_mem",
-               .data           = &sysctl_tcp_mem,
-               .maxlen         = sizeof(sysctl_tcp_mem),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax
-       },
-       {
                .procname       = "tcp_wmem",
                .data           = &sysctl_tcp_wmem,
                .maxlen         = sizeof(sysctl_tcp_wmem),
@@ -721,6 +759,12 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = ipv4_ping_group_range,
        },
+       {
+               .procname       = "tcp_mem",
+               .maxlen         = sizeof(init_net.ipv4.sysctl_tcp_mem),
+               .mode           = 0644,
+               .proc_handler   = ipv4_tcp_mem,
+       },
        { }
 };
 
@@ -734,6 +778,7 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
 static __net_init int ipv4_sysctl_init_net(struct net *net)
 {
        struct ctl_table *table;
+       unsigned long limit;
 
        table = ipv4_net_table;
        if (!net_eq(net, &init_net)) {
@@ -769,6 +814,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
 
        net->ipv4.sysctl_rt_cache_rebuild_count = 4;
 
+       limit = nr_free_buffer_pages() / 8;
+       limit = max(limit, 128UL);
+       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+       net->ipv4.sysctl_tcp_mem[1] = limit;
+       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+
        net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
                        net_ipv4_ctl_path, table);
        if (net->ipv4.ipv4_hdr == NULL)
index 34f5db1..9bcdec3 100644 (file)
@@ -282,11 +282,9 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
-long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
-EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -888,18 +886,18 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(tcp_sendpage);
 
-#define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
-#define TCP_OFF(sk)    (sk->sk_sndmsg_off)
-
-static inline int select_size(const struct sock *sk, int sg)
+static inline int select_size(const struct sock *sk, bool sg)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        int tmp = tp->mss_cache;
 
        if (sg) {
-               if (sk_can_gso(sk))
-                       tmp = 0;
-               else {
+               if (sk_can_gso(sk)) {
+                       /* Small frames wont use a full page:
+                        * Payload will immediately follow tcp header.
+                        */
+                       tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+               } else {
                        int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 
                        if (tmp >= pgbreak &&
@@ -917,9 +915,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct iovec *iov;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int iovlen, flags;
+       int iovlen, flags, err, copied;
        int mss_now, size_goal;
-       int sg, err, copied;
+       bool sg;
        long timeo;
 
        lock_sock(sk);
@@ -946,7 +944,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
 
-       sg = sk->sk_route_caps & NETIF_F_SG;
+       sg = !!(sk->sk_route_caps & NETIF_F_SG);
 
        while (--iovlen >= 0) {
                size_t seglen = iov->iov_len;
@@ -1005,8 +1003,13 @@ new_segment:
                        } else {
                                int merge = 0;
                                int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = TCP_PAGE(sk);
-                               int off = TCP_OFF(sk);
+                               struct page *page = sk->sk_sndmsg_page;
+                               int off;
+
+                               if (page && page_count(page) == 1)
+                                       sk->sk_sndmsg_off = 0;
+
+                               off = sk->sk_sndmsg_off;
 
                                if (skb_can_coalesce(skb, i, page, off) &&
                                    off != PAGE_SIZE) {
@@ -1023,7 +1026,7 @@ new_segment:
                                } else if (page) {
                                        if (off == PAGE_SIZE) {
                                                put_page(page);
-                                               TCP_PAGE(sk) = page = NULL;
+                                               sk->sk_sndmsg_page = page = NULL;
                                                off = 0;
                                        }
                                } else
@@ -1049,9 +1052,9 @@ new_segment:
                                        /* If this page was new, give it to the
                                         * socket so it does not get leaked.
                                         */
-                                       if (!TCP_PAGE(sk)) {
-                                               TCP_PAGE(sk) = page;
-                                               TCP_OFF(sk) = 0;
+                                       if (!sk->sk_sndmsg_page) {
+                                               sk->sk_sndmsg_page = page;
+                                               sk->sk_sndmsg_off = 0;
                                        }
                                        goto do_error;
                                }
@@ -1061,15 +1064,15 @@ new_segment:
                                        skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                                } else {
                                        skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (TCP_PAGE(sk)) {
+                                       if (sk->sk_sndmsg_page) {
                                                get_page(page);
                                        } else if (off + copy < PAGE_SIZE) {
                                                get_page(page);
-                                               TCP_PAGE(sk) = page;
+                                               sk->sk_sndmsg_page = page;
                                        }
                                }
 
-                               TCP_OFF(sk) = off + copy;
+                               sk->sk_sndmsg_off = off + copy;
                        }
 
                        if (!copied)
@@ -2653,7 +2656,8 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct tcphdr *th;
@@ -3272,14 +3276,9 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       limit = nr_free_buffer_pages() / 8;
-       limit = max(limit, 128UL);
-       sysctl_tcp_mem[0] = limit / 4 * 3;
-       sysctl_tcp_mem[1] = limit;
-       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
-
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
-       limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+       limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1])
+               << (PAGE_SHIFT - 7);
        max_share = min(4UL*1024*1024, limit);
 
        sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
index 850c737..fc6d475 100644 (file)
@@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
            left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
            left * tp->mss_cache < sk->sk_gso_max_size)
                return 1;
-       return left <= tcp_max_burst(tp);
+       return left <= tcp_max_tso_deferred_mss(tp);
 }
 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
 
index 939edb3..6334b1f 100644 (file)
@@ -34,11 +34,23 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                tcp_get_info(sk, info);
 }
 
+static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
+}
+
+static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
+}
+
 static const struct inet_diag_handler tcp_diag_handler = {
-       .idiag_hashinfo  = &tcp_hashinfo,
+       .dump            = tcp_diag_dump,
+       .dump_one        = tcp_diag_dump_one,
        .idiag_get_info  = tcp_diag_get_info,
-       .idiag_type      = TCPDIAG_GETSOCK,
-       .idiag_info_size = sizeof(struct tcp_info),
+       .idiag_type      = IPPROTO_TCP,
 };
 
 static int __init tcp_diag_init(void)
@@ -54,4 +66,4 @@ static void __exit tcp_diag_exit(void)
 module_init(tcp_diag_init);
 module_exit(tcp_diag_exit);
 MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_INET_DIAG, TCPDIAG_GETSOCK);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 6);
index 52b5c2d..f131d92 100644 (file)
@@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !tcp_memory_pressure) {
+           !sk_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk)
 
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-           !tcp_memory_pressure &&
-           atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+           !sk_under_memory_pressure(sk) &&
+           sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
        }
@@ -2663,7 +2663,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
                       tp->snd_ssthresh, tp->prior_ssthresh,
                       tp->packets_out);
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
                printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
@@ -2858,7 +2858,7 @@ static void tcp_try_keep_open(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        int state = TCP_CA_Open;
 
-       if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
+       if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
                state = TCP_CA_Disorder;
 
        if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2881,7 +2881,8 @@ static void tcp_try_to_open(struct sock *sk, int flag)
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
-               tcp_moderate_cwnd(tp);
+               if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
+                       tcp_moderate_cwnd(tp);
        } else {
                tcp_cwnd_down(sk, flag);
        }
@@ -3009,11 +3010,11 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
-                                 int newly_acked_sacked, int flag)
+                                 int newly_acked_sacked, bool is_dupack,
+                                 int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
        int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
        int fast_rexmit = 0, mib_idx;
@@ -3066,17 +3067,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                        }
                        break;
 
-               case TCP_CA_Disorder:
-                       tcp_try_undo_dsack(sk);
-                       if (!tp->undo_marker ||
-                           /* For SACK case do not Open to allow to undo
-                            * catching for all duplicate ACKs. */
-                           tcp_is_reno(tp) || tp->snd_una != tp->high_seq) {
-                               tp->undo_marker = 0;
-                               tcp_set_ca_state(sk, TCP_CA_Open);
-                       }
-                       break;
-
                case TCP_CA_Recovery:
                        if (tcp_is_reno(tp))
                                tcp_reset_reno_sack(tp);
@@ -3117,7 +3107,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                                tcp_add_reno_sack(sk);
                }
 
-               if (icsk->icsk_ca_state == TCP_CA_Disorder)
+               if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk)) {
@@ -3681,10 +3671,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        u32 prior_snd_una = tp->snd_una;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
+       bool is_dupack = false;
        u32 prior_in_flight;
        u32 prior_fackets;
        int prior_packets;
        int prior_sacked = tp->sacked_out;
+       int pkts_acked = 0;
        int newly_acked_sacked = 0;
        int frto_cwnd = 0;
 
@@ -3757,6 +3749,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* See if we can take anything off of the retransmit queue. */
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
+       pkts_acked = prior_packets - tp->packets_out;
        newly_acked_sacked = (prior_packets - prior_sacked) -
                             (tp->packets_out - tp->sacked_out);
 
@@ -3771,8 +3764,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight);
-               tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
-                                     newly_acked_sacked, flag);
+               is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+                                     is_dupack, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3784,6 +3778,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        return 1;
 
 no_queue:
+       /* If data was DSACKed, see if we can undo a cwnd reduction. */
+       if (flag & FLAG_DSACKING_ACK)
+               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+                                     is_dupack, flag);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3797,10 +3795,14 @@ invalid_ack:
        return -1;
 
 old_ack:
+       /* If data was SACKed, tag it and see if we should send more data.
+        * If data was DSACKed, see if we can undo a cwnd reduction.
+        */
        if (TCP_SKB_CB(skb)->sacked) {
-               tcp_sacktag_write_queue(sk, skb, prior_snd_una);
-               if (icsk->icsk_ca_state == TCP_CA_Open)
-                       tcp_try_keep_open(sk);
+               flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+               newly_acked_sacked = tp->sacked_out - prior_sacked;
+               tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+                                     is_dupack, flag);
        }
 
        SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
@@ -4864,7 +4866,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
-       else if (tcp_memory_pressure)
+       else if (sk_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
@@ -4930,11 +4932,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
                return 0;
 
        /* If we are under global TCP memory pressure, do not expand.  */
-       if (tcp_memory_pressure)
+       if (sk_under_memory_pressure(sk))
                return 0;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
-       if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+       if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
                return 0;
 
        /* If we filled the congestion window, do not expand.  */
@@ -5809,6 +5811,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
 
                if (th->syn) {
+                       if (th->fin)
+                               goto discard;
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
index a9db4b1..1eb4ad5 100644 (file)
@@ -73,6 +73,7 @@
 #include <net/xfrm.h>
 #include <net/netdma.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <linux/inet.h>
 #include <linux/ipv6.h>
@@ -1511,6 +1512,7 @@ exit:
        return NULL;
 put_and_exit:
        tcp_clear_xmit_timers(newsk);
+       tcp_cleanup_congestion_control(newsk);
        bh_unlock_sock(newsk);
        sock_put(newsk);
        goto exit;
@@ -1916,7 +1918,8 @@ static int tcp_v4_init_sock(struct sock *sk)
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
        local_bh_disable();
-       percpu_counter_inc(&tcp_sockets_allocated);
+       sock_update_memcg(sk);
+       sk_sockets_allocated_inc(sk);
        local_bh_enable();
 
        return 0;
@@ -1972,7 +1975,8 @@ void tcp_v4_destroy_sock(struct sock *sk)
                tp->cookie_values = NULL;
        }
 
-       percpu_counter_dec(&tcp_sockets_allocated);
+       sk_sockets_allocated_dec(sk);
+       sock_release_memcg(sk);
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);
 
@@ -2619,7 +2623,6 @@ struct proto tcp_prot = {
        .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
-       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
@@ -2633,10 +2636,14 @@ struct proto tcp_prot = {
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       .init_cgroup            = tcp_init_cgroup,
+       .destroy_cgroup         = tcp_destroy_cgroup,
+       .proto_cgroup           = tcp_proto_cgroup,
+#endif
 };
 EXPORT_SYMBOL(tcp_prot);
 
-
 static int __net_init tcp_sk_init(struct net *net)
 {
        return inet_ctl_sock_create(&net->ipv4.tcp_sock,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
new file mode 100644 (file)
index 0000000..7fed04f
--- /dev/null
@@ -0,0 +1,272 @@
+#include <net/tcp.h>
+#include <net/tcp_memcontrol.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <linux/nsproxy.h>
+#include <linux/memcontrol.h>
+#include <linux/module.h>
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft);
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer);
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event);
+
+static struct cftype tcp_files[] = {
+       {
+               .name = "kmem.tcp.limit_in_bytes",
+               .write_string = tcp_cgroup_write,
+               .read_u64 = tcp_cgroup_read,
+               .private = RES_LIMIT,
+       },
+       {
+               .name = "kmem.tcp.usage_in_bytes",
+               .read_u64 = tcp_cgroup_read,
+               .private = RES_USAGE,
+       },
+       {
+               .name = "kmem.tcp.failcnt",
+               .private = RES_FAILCNT,
+               .trigger = tcp_cgroup_reset,
+               .read_u64 = tcp_cgroup_read,
+       },
+       {
+               .name = "kmem.tcp.max_usage_in_bytes",
+               .private = RES_MAX_USAGE,
+               .trigger = tcp_cgroup_reset,
+               .read_u64 = tcp_cgroup_read,
+       },
+};
+
+static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
+{
+       return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
+}
+
+static void memcg_tcp_enter_memory_pressure(struct sock *sk)
+{
+       if (sk->sk_cgrp->memory_pressure)
+               *sk->sk_cgrp->memory_pressure = 1;
+}
+EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
+
+int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       /*
+        * The root cgroup does not use res_counters, but rather,
+        * rely on the data already collected by the network
+        * subsystem
+        */
+       struct res_counter *res_parent = NULL;
+       struct cg_proto *cg_proto, *parent_cg;
+       struct tcp_memcontrol *tcp;
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+       struct net *net = current->nsproxy->net_ns;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               goto create_files;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
+       tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
+       tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
+       tcp->tcp_memory_pressure = 0;
+
+       parent_cg = tcp_prot.proto_cgroup(parent);
+       if (parent_cg)
+               res_parent = parent_cg->memory_allocated;
+
+       res_counter_init(&tcp->tcp_memory_allocated, res_parent);
+       percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+
+       cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
+       cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
+       cg_proto->sysctl_mem = tcp->tcp_prot_mem;
+       cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
+       cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
+       cg_proto->memcg = memcg;
+
+create_files:
+       return cgroup_add_files(cgrp, ss, tcp_files,
+                               ARRAY_SIZE(tcp_files));
+}
+EXPORT_SYMBOL(tcp_init_cgroup);
+
+void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct cg_proto *cg_proto;
+       struct tcp_memcontrol *tcp;
+       u64 val;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+
+       val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+
+       if (val != RESOURCE_MAX)
+               jump_label_dec(&memcg_socket_limit_enabled);
+}
+EXPORT_SYMBOL(tcp_destroy_cgroup);
+
+static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
+{
+       struct net *net = current->nsproxy->net_ns;
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+       u64 old_lim;
+       int i;
+       int ret;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return -EINVAL;
+
+       if (val > RESOURCE_MAX)
+               val = RESOURCE_MAX;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+       ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < 3; i++)
+               tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
+                                            net->ipv4.sysctl_tcp_mem[i]);
+
+       if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
+               jump_label_dec(&memcg_socket_limit_enabled);
+       else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
+               jump_label_inc(&memcg_socket_limit_enabled);
+
+       return 0;
+}
+
+static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+                           const char *buffer)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       unsigned long long val;
+       int ret = 0;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               /* see memcontrol.c */
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (ret)
+                       break;
+               ret = tcp_update_limit(memcg, val);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return default_val;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+}
+
+static u64 tcp_read_usage(struct mem_cgroup *memcg)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+}
+
+static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+       u64 val;
+
+       switch (cft->private) {
+       case RES_LIMIT:
+               val = tcp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX);
+               break;
+       case RES_USAGE:
+               val = tcp_read_usage(memcg);
+               break;
+       case RES_FAILCNT:
+       case RES_MAX_USAGE:
+               val = tcp_read_stat(memcg, cft->private, 0);
+               break;
+       default:
+               BUG();
+       }
+       return val;
+}
+
+static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+{
+       struct mem_cgroup *memcg;
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       memcg = mem_cgroup_from_cont(cont);
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return 0;
+       tcp = tcp_from_cgproto(cg_proto);
+
+       switch (event) {
+       case RES_MAX_USAGE:
+               res_counter_reset_max(&tcp->tcp_memory_allocated);
+               break;
+       case RES_FAILCNT:
+               res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+               break;
+       }
+
+       return 0;
+}
+
+unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
+       if (!cg_proto)
+               return 0;
+
+       tcp = tcp_from_cgproto(cg_proto);
+       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
+}
+
+void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
+{
+       struct tcp_memcontrol *tcp;
+       struct cg_proto *cg_proto;
+
+       cg_proto = tcp_prot.proto_cgroup(memcg);
+       if (!cg_proto)
+               return;
+
+       tcp = tcp_from_cgproto(cg_proto);
+
+       tcp->tcp_prot_mem[idx] = val;
+}
index 66363b6..550e755 100644 (file)
@@ -336,15 +336,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
                tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
                        struct inet6_timewait_sock *tw6;
 
                        tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
                        tw6 = inet6_twsk((struct sock *)tw);
-                       ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
-                       ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+                       tw6->tw_v6_daddr = np->daddr;
+                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
                        tw->tw_tclass = np->tclass;
                        tw->tw_ipv6only = np->ipv6only;
                }
@@ -425,7 +425,7 @@ static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
  */
 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
 {
-       struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
+       struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
        if (newsk != NULL) {
                const struct inet_request_sock *ireq = inet_rsk(req);
@@ -495,7 +495,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->frto_counter = 0;
                newtp->frto_highmark = 0;
 
-               newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
+               if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
+                   !try_module_get(newicsk->icsk_ca_ops->owner))
+                       newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
 
                tcp_set_ca_state(newsk, TCP_CA_Open);
                tcp_init_xmit_timers(newsk);
index 63170e2..8c8de27 100644 (file)
@@ -1093,6 +1093,13 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
 {
        int i, k, eat;
 
+       eat = min_t(int, len, skb_headlen(skb));
+       if (eat) {
+               __skb_pull(skb, eat);
+               len -= eat;
+               if (!len)
+                       return;
+       }
        eat = len;
        k = 0;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -1124,11 +1131,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
        if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
                return -ENOMEM;
 
-       /* If len == headlen, we avoid __skb_pull to preserve alignment. */
-       if (unlikely(len < skb_headlen(skb)))
-               __skb_pull(skb, len);
-       else
-               __pskb_trim_head(skb, len - skb_headlen(skb));
+       __pskb_trim_head(skb, len);
 
        TCP_SKB_CB(skb)->seq += len;
        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1581,7 +1584,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
                 * frame, so if we have space for more than 3 frames
                 * then send now.
                 */
-               if (limit > tcp_max_burst(tp) * tp->mss_cache)
+               if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
                        goto send_now;
        }
 
@@ -1919,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
-               if (tcp_memory_pressure)
+               if (sk_under_memory_pressure(sk))
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh,
                                               4U * tp->advmss);
 
@@ -2147,7 +2150,15 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         */
        TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
-       err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+       /* make sure skb->data is aligned on arches that require it */
+       if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) {
+               struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+                                                  GFP_ATOMIC);
+               err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+                            -ENOBUFS;
+       } else {
+               err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+       }
 
        if (err == 0) {
                /* Update global TCP statistics. */
index 2e0f0af..40a41f0 100644 (file)
@@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data)
        }
 
 out:
-       if (tcp_memory_pressure)
+       if (sk_under_memory_pressure(sk))
                sk_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
@@ -340,7 +340,7 @@ void tcp_retransmit_timer(struct sock *sk)
                               &inet->inet_daddr, ntohs(inet->inet_dport),
                               inet->inet_num, tp->snd_una, tp->snd_nxt);
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                else if (sk->sk_family == AF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
index ac3b3ee..0177598 100644 (file)
@@ -105,7 +105,7 @@ drop:
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int tunnel64_rcv(struct sk_buff *skb)
 {
        struct xfrm_tunnel *handler;
@@ -134,7 +134,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
                        break;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static void tunnel64_err(struct sk_buff *skb, u32 info)
 {
        struct xfrm_tunnel *handler;
@@ -152,7 +152,7 @@ static const struct net_protocol tunnel4_protocol = {
        .netns_ok       =       1,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static const struct net_protocol tunnel64_protocol = {
        .handler        =       tunnel64_rcv,
        .err_handler    =       tunnel64_err,
@@ -167,7 +167,7 @@ static int __init tunnel4_init(void)
                printk(KERN_ERR "tunnel4 init: can't add protocol\n");
                return -EAGAIN;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
                printk(KERN_ERR "tunnel64 init: can't add protocol\n");
                inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
@@ -179,7 +179,7 @@ static int __init tunnel4_init(void)
 
 static void __exit tunnel4_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
                printk(KERN_ERR "tunnel64 close: can't remove protocol\n");
 #endif
index 5a65eea..5d075b5 100644 (file)
@@ -445,7 +445,7 @@ exact_match:
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
-static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
                __be16 sport, __be32 daddr, __be16 dport,
                int dif, struct udp_table *udptable)
 {
@@ -512,6 +512,7 @@ begin:
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
 
 static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
                                                 __be16 sport, __be16 dport,
@@ -1358,7 +1359,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        if (inet_sk(sk)->inet_daddr)
                sock_rps_save_rxhash(sk, skb);
 
-       rc = ip_queue_rcv_skb(sk, skb);
+       rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
                int is_udplite = IS_UDPLITE(sk);
 
@@ -1474,6 +1475,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        rc = 0;
 
+       ipv4_pktinfo_prepare(skb);
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
@@ -2247,7 +2249,8 @@ int udp4_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
new file mode 100644 (file)
index 0000000..27910c1
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * udp_diag.c  Module for monitoring UDP transport protocols sockets.
+ *
+ * Authors:    Pavel Emelyanov, <xemul@parallels.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/module.h>
+#include <linux/inet_diag.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <linux/inet_diag.h>
+#include <linux/sock_diag.h>
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
+               struct netlink_callback *cb, struct inet_diag_req *req,
+               struct nlattr *bc)
+{
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
+
+       return inet_sk_diag_fill(sk, NULL, skb, req, NETLINK_CB(cb->skb).pid,
+                       cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+}
+
+static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
+               const struct nlmsghdr *nlh, struct inet_diag_req *req)
+{
+       int err = -EINVAL;
+       struct sock *sk;
+       struct sk_buff *rep;
+
+       if (req->sdiag_family == AF_INET)
+               sk = __udp4_lib_lookup(&init_net,
+                               req->id.idiag_src[0], req->id.idiag_sport,
+                               req->id.idiag_dst[0], req->id.idiag_dport,
+                               req->id.idiag_if, tbl);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (req->sdiag_family == AF_INET6)
+               sk = __udp6_lib_lookup(&init_net,
+                               (struct in6_addr *)req->id.idiag_src,
+                               req->id.idiag_sport,
+                               (struct in6_addr *)req->id.idiag_dst,
+                               req->id.idiag_dport,
+                               req->id.idiag_if, tbl);
+#endif
+       else
+               goto out_nosk;
+
+       err = -ENOENT;
+       if (sk == NULL)
+               goto out_nosk;
+
+       err = inet_diag_check_cookie(sk, req);
+       if (err)
+               goto out;
+
+       err = -ENOMEM;
+       rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
+                                    sizeof(struct inet_diag_meminfo) +
+                                    64)), GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       err = inet_sk_diag_fill(sk, NULL, rep, req,
+                          NETLINK_CB(in_skb).pid,
+                          nlh->nlmsg_seq, 0, nlh);
+       if (err < 0) {
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(rep);
+               goto out;
+       }
+       err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       if (sk)
+               sock_put(sk);
+out_nosk:
+       return err;
+}
+
+static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       int num, s_num, slot, s_slot;
+
+       s_slot = cb->args[0];
+       num = s_num = cb->args[1];
+
+       for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+               struct sock *sk;
+               struct hlist_nulls_node *node;
+               struct udp_hslot *hslot = &table->hash[slot];
+
+               if (hlist_nulls_empty(&hslot->head))
+                       continue;
+
+               spin_lock_bh(&hslot->lock);
+               sk_nulls_for_each(sk, node, &hslot->head) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       if (num < s_num)
+                               goto next;
+                       if (!(r->idiag_states & (1 << sk->sk_state)))
+                               goto next;
+                       if (r->sdiag_family != AF_UNSPEC &&
+                                       sk->sk_family != r->sdiag_family)
+                               goto next;
+                       if (r->id.idiag_sport != inet->inet_sport &&
+                           r->id.idiag_sport)
+                               goto next;
+                       if (r->id.idiag_dport != inet->inet_dport &&
+                           r->id.idiag_dport)
+                               goto next;
+
+                       if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                               spin_unlock_bh(&hslot->lock);
+                               goto done;
+                       }
+next:
+                       num++;
+               }
+               spin_unlock_bh(&hslot->lock);
+       }
+done:
+       cb->args[0] = slot;
+       cb->args[1] = num;
+}
+
+static void udp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       udp_dump(&udp_table, skb, cb, r, bc);
+}
+
+static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return udp_dump_one(&udp_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udp_diag_handler = {
+       .dump            = udp_diag_dump,
+       .dump_one        = udp_diag_dump_one,
+       .idiag_type      = IPPROTO_UDP,
+};
+
+static void udplite_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct inet_diag_req *r, struct nlattr *bc)
+{
+       udp_dump(&udplite_table, skb, cb, r, bc);
+}
+
+static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+               struct inet_diag_req *req)
+{
+       return udp_dump_one(&udplite_table, in_skb, nlh, req);
+}
+
+static const struct inet_diag_handler udplite_diag_handler = {
+       .dump            = udplite_diag_dump,
+       .dump_one        = udplite_diag_dump_one,
+       .idiag_type      = IPPROTO_UDPLITE,
+};
+
+static int __init udp_diag_init(void)
+{
+       int err;
+
+       err = inet_diag_register(&udp_diag_handler);
+       if (err)
+               goto out;
+       err = inet_diag_register(&udplite_diag_handler);
+       if (err)
+               goto out_lite;
+out:
+       return err;
+out_lite:
+       inet_diag_unregister(&udp_diag_handler);
+       goto out;
+}
+
+static void __exit udp_diag_exit(void)
+{
+       inet_diag_unregister(&udplite_diag_handler);
+       inet_diag_unregister(&udp_diag_handler);
+}
+
+module_init(udp_diag_init);
+module_exit(udp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17);
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 136);
index 8280645..9247d9d 100644 (file)
@@ -64,7 +64,7 @@ static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
        .priority       =       2,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
        .handler        =       xfrm_tunnel_rcv,
        .err_handler    =       xfrm_tunnel_err,
@@ -84,7 +84,7 @@ static int __init ipip_init(void)
                xfrm_unregister_type(&ipip_type, AF_INET);
                return -EAGAIN;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
                printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n");
                xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
@@ -97,7 +97,7 @@ static int __init ipip_init(void)
 
 static void __exit ipip_fini(void)
 {
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
                printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n");
 #endif
index 36806de..59a9d0e 100644 (file)
@@ -630,13 +630,13 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
                goto out;
        }
 
-       rt = addrconf_dst_alloc(idev, addr, 0);
+       rt = addrconf_dst_alloc(idev, addr, false);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
                goto out;
        }
 
-       ipv6_addr_copy(&ifa->addr, addr);
+       ifa->addr = *addr;
 
        spin_lock_init(&ifa->lock);
        spin_lock_init(&ifa->state_lock);
@@ -657,7 +657,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
         * layer address of our nexhop router
         */
 
-       if (dst_get_neighbour_raw(&rt->dst) == NULL)
+       if (dst_get_neighbour_noref_raw(&rt->dst) == NULL)
                ifa->flags &= ~IFA_F_OPTIMISTIC;
 
        ifa->idev = idev;
@@ -1228,7 +1228,7 @@ try_nextdev:
        if (!hiscore->ifa)
                return -EADDRNOTAVAIL;
 
-       ipv6_addr_copy(saddr, &hiscore->ifa->addr);
+       *saddr = hiscore->ifa->addr;
        in6_ifa_put(hiscore->ifa);
        return 0;
 }
@@ -1249,7 +1249,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
                list_for_each_entry(ifp, &idev->addr_list, if_list) {
                        if (ifp->scope == IFA_LINK &&
                            !(ifp->flags & banned_flags)) {
-                               ipv6_addr_copy(addr, &ifp->addr);
+                               *addr = ifp->addr;
                                err = 0;
                                break;
                        }
@@ -1700,7 +1700,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
                .fc_protocol = RTPROT_KERNEL,
        };
 
-       ipv6_addr_copy(&cfg.fc_dst, pfx);
+       cfg.fc_dst = *pfx;
 
        /* Prevent useless cloning on PtP SIT.
           This thing is done here expecting that the whole
index d27c797..273f48d 100644 (file)
@@ -347,7 +347,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                         */
                        v4addr = LOOPBACK4_IPV6;
                        if (!(addr_type & IPV6_ADDR_MULTICAST)) {
-                               if (!inet->transparent &&
+                               if (!(inet->freebind || inet->transparent) &&
                                    !ipv6_chk_addr(net, &addr->sin6_addr,
                                                   dev, 0)) {
                                        err = -EADDRNOTAVAIL;
@@ -361,10 +361,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        inet->inet_rcv_saddr = v4addr;
        inet->inet_saddr = v4addr;
 
-       ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+       np->rcv_saddr = addr->sin6_addr;
 
        if (!(addr_type & IPV6_ADDR_MULTICAST))
-               ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+               np->saddr = addr->sin6_addr;
 
        /* Make sure we are allowed to bind here. */
        if (sk->sk_prot->get_port(sk, snum)) {
@@ -458,14 +458,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
                    peer == 1)
                        return -ENOTCONN;
                sin->sin6_port = inet->inet_dport;
-               ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+               sin->sin6_addr = np->daddr;
                if (np->sndflow)
                        sin->sin6_flowinfo = np->flow_label;
        } else {
                if (ipv6_addr_any(&np->rcv_saddr))
-                       ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
+                       sin->sin6_addr = np->saddr;
                else
-                       ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr);
+                       sin->sin6_addr = np->rcv_saddr;
 
                sin->sin6_port = inet->inet_sport;
        }
@@ -660,8 +660,8 @@ int inet6_sk_rebuild_header(struct sock *sk)
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = sk->sk_protocol;
-               ipv6_addr_copy(&fl6.daddr, &np->daddr);
-               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.daddr = np->daddr;
+               fl6.saddr = np->saddr;
                fl6.flowlabel = np->flow_label;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = sk->sk_mark;
@@ -769,7 +769,8 @@ out:
        return err;
 }
 
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        struct ipv6hdr *ipv6h;
@@ -985,9 +986,9 @@ static int __net_init ipv6_init_mibs(struct net *net)
                          sizeof(struct icmpv6_mib),
                          __alignof__(struct icmpv6_mib)) < 0)
                goto err_icmp_mib;
-       if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
-                         sizeof(struct icmpv6msg_mib),
-                         __alignof__(struct icmpv6msg_mib)) < 0)
+       net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib),
+                                               GFP_KERNEL);
+       if (!net->mib.icmpv6msg_statistics)
                goto err_icmpmsg_mib;
        return 0;
 
@@ -1008,7 +1009,7 @@ static void ipv6_cleanup_mibs(struct net *net)
        snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
        snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
        snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
-       snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
+       kfree(net->mib.icmpv6msg_statistics);
 }
 
 static int __net_init inet6_net_init(struct net *net)
@@ -1115,6 +1116,8 @@ static int __init inet6_init(void)
        if (err)
                goto static_sysctl_fail;
 #endif
+       tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
+
        /*
         *      ipngwg API draft makes clear that the correct semantics
         *      for TCP and UDP is to consider one TCP and UDP instance
index 4c0f894..2ae79db 100644 (file)
@@ -193,9 +193,9 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
                                                printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length);
                                        goto bad;
                                }
-                               ipv6_addr_copy(&final_addr, &hao->addr);
-                               ipv6_addr_copy(&hao->addr, &iph->saddr);
-                               ipv6_addr_copy(&iph->saddr, &final_addr);
+                               final_addr = hao->addr;
+                               hao->addr = iph->saddr;
+                               iph->saddr = final_addr;
                        }
                        break;
                }
@@ -241,13 +241,13 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
        segments = rthdr->hdrlen >> 1;
 
        addrs = ((struct rt0_hdr *)rthdr)->addr;
-       ipv6_addr_copy(&final_addr, addrs + segments - 1);
+       final_addr = addrs[segments - 1];
 
        addrs += segments - segments_left;
        memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
 
-       ipv6_addr_copy(addrs, &iph->daddr);
-       ipv6_addr_copy(&iph->daddr, &final_addr);
+       addrs[0] = iph->daddr;
+       iph->daddr = final_addr;
 }
 
 static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
index 674255f..cc540f9 100644 (file)
@@ -75,7 +75,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        if (pac == NULL)
                return -ENOMEM;
        pac->acl_next = NULL;
-       ipv6_addr_copy(&pac->acl_addr, addr);
+       pac->acl_addr = *addr;
 
        rcu_read_lock();
        if (ifindex == 0) {
@@ -289,14 +289,14 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
                goto out;
        }
 
-       rt = addrconf_dst_alloc(idev, addr, 1);
+       rt = addrconf_dst_alloc(idev, addr, true);
        if (IS_ERR(rt)) {
                kfree(aca);
                err = PTR_ERR(rt);
                goto out;
        }
 
-       ipv6_addr_copy(&aca->aca_addr, addr);
+       aca->aca_addr = *addr;
        aca->aca_idev = idev;
        aca->aca_rt = rt;
        aca->aca_users = 1;
index e248069..ae08aee 100644 (file)
@@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
-                       ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+                       usin->sin6_addr = flowlabel->dst;
                }
        }
 
@@ -143,7 +143,7 @@ ipv4_connected:
                }
        }
 
-       ipv6_addr_copy(&np->daddr, daddr);
+       np->daddr = *daddr;
        np->flow_label = fl6.flowlabel;
 
        inet->inet_dport = usin->sin6_port;
@@ -154,8 +154,8 @@ ipv4_connected:
         */
 
        fl6.flowi6_proto = sk->sk_protocol;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.daddr = np->daddr;
+       fl6.saddr = np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = inet->inet_dport;
@@ -179,10 +179,10 @@ ipv4_connected:
        /* source address lookup done in ip6_dst_lookup */
 
        if (ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&np->saddr, &fl6.saddr);
+               np->saddr = fl6.saddr;
 
        if (ipv6_addr_any(&np->rcv_saddr)) {
-               ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr);
+               np->rcv_saddr = fl6.saddr;
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
                if (sk->sk_prot->rehash)
                        sk->sk_prot->rehash(sk);
@@ -257,7 +257,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
        skb_put(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        iph = ipv6_hdr(skb);
-       ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+       iph->daddr = fl6->daddr;
 
        serr = SKB_EXT_ERR(skb);
        serr->ee.ee_errno = err;
@@ -294,7 +294,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
        skb_put(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        iph = ipv6_hdr(skb);
-       ipv6_addr_copy(&iph->daddr, &fl6->daddr);
+       iph->daddr = fl6->daddr;
 
        mtu_info = IP6CBMTU(skb);
 
@@ -303,7 +303,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
        mtu_info->ip6m_addr.sin6_port = 0;
        mtu_info->ip6m_addr.sin6_flowinfo = 0;
        mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
-       ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);
+       mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr;
 
        __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
        skb_reset_transport_header(skb);
@@ -354,8 +354,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_port = serr->port;
                sin->sin6_scope_id = 0;
                if (skb->protocol == htons(ETH_P_IPV6)) {
-                       ipv6_addr_copy(&sin->sin6_addr,
-                                 (struct in6_addr *)(nh + serr->addr_offset));
+                       sin->sin6_addr =
+                               *(struct in6_addr *)(nh + serr->addr_offset);
                        if (np->sndflow)
                                sin->sin6_flowinfo =
                                        (*(__be32 *)(nh + serr->addr_offset - 24) &
@@ -376,7 +376,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_flowinfo = 0;
                sin->sin6_scope_id = 0;
                if (skb->protocol == htons(ETH_P_IPV6)) {
-                       ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+                       sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
                                datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -451,7 +451,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
                sin->sin6_flowinfo = 0;
                sin->sin6_port = 0;
                sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
-               ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
+               sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
        }
 
        put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
@@ -475,7 +475,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                struct in6_pktinfo src_info;
 
                src_info.ipi6_ifindex = opt->iif;
-               ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+               src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
                put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
        }
 
@@ -550,7 +550,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                struct in6_pktinfo src_info;
 
                src_info.ipi6_ifindex = opt->iif;
-               ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr);
+               src_info.ipi6_addr = ipv6_hdr(skb)->daddr;
                put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
        }
        if (np->rxopt.bits.rxohlim) {
@@ -584,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
                         */
 
                        sin6.sin6_family = AF_INET6;
-                       ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr);
+                       sin6.sin6_addr = ipv6_hdr(skb)->daddr;
                        sin6.sin6_port = ports[1];
                        sin6.sin6_flowinfo = 0;
                        sin6.sin6_scope_id = 0;
@@ -654,12 +654,12 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
 
                        if (addr_type != IPV6_ADDR_ANY) {
                                int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
-                               if (!inet_sk(sk)->transparent &&
+                               if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) &&
                                    !ipv6_chk_addr(net, &src_info->ipi6_addr,
                                                   strict ? dev : NULL, 0))
                                        err = -EINVAL;
                                else
-                                       ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr);
+                                       fl6->saddr = src_info->ipi6_addr;
                        }
 
                        rcu_read_unlock();
index bf22a22..3d641b6 100644 (file)
@@ -243,9 +243,9 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff)
        if (skb->ip_summed == CHECKSUM_COMPLETE)
                skb->ip_summed = CHECKSUM_NONE;
 
-       ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
-       ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
-       ipv6_addr_copy(&hao->addr, &tmp_addr);
+       tmp_addr = ipv6h->saddr;
+       ipv6h->saddr = hao->addr;
+       hao->addr = tmp_addr;
 
        if (skb->tstamp.tv64 == 0)
                __net_timestamp(skb);
@@ -461,9 +461,9 @@ looped_back:
                return -1;
        }
 
-       ipv6_addr_copy(&daddr, addr);
-       ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr);
-       ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr);
+       daddr = *addr;
+       *addr = ipv6_hdr(skb)->daddr;
+       ipv6_hdr(skb)->daddr = daddr;
 
        skb_dst_drop(skb);
        ip6_route_input(skb);
@@ -690,7 +690,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
                memcpy(phdr->addr, ihdr->addr + 1,
                       (hops - 1) * sizeof(struct in6_addr));
 
-       ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
+       phdr->addr[hops - 1] = **addr_p;
        *addr_p = ihdr->addr;
 
        phdr->rt_hdr.nexthdr = *proto;
@@ -888,8 +888,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
        if (!opt || !opt->srcrt)
                return NULL;
 
-       ipv6_addr_copy(orig, &fl6->daddr);
-       ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr);
+       *orig = fl6->daddr;
+       fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
        return orig;
 }
 
index 37f548b..72957f4 100644 (file)
@@ -57,6 +57,9 @@ int ipv6_ext_hdr(u8 nexthdr)
  *         it returns NULL.
  *       - First fragment header is skipped, not-first ones
  *         are considered as unparsable.
+ *       - Reports the offset field of the final fragment header so it is
+ *         possible to tell whether this is a first fragment, later fragment,
+ *         or not fragmented.
  *       - ESP is unparsable for now and considered like
  *         normal payload protocol.
  *       - Note also special handling of AUTH header. Thanks to IPsec wizards.
@@ -64,10 +67,13 @@ int ipv6_ext_hdr(u8 nexthdr)
  * --ANK (980726)
  */
 
-int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
+int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
+                    __be16 *frag_offp)
 {
        u8 nexthdr = *nexthdrp;
 
+       *frag_offp = 0;
+
        while (ipv6_ext_hdr(nexthdr)) {
                struct ipv6_opt_hdr _hdr, *hp;
                int hdrlen;
@@ -87,7 +93,8 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp)
                        if (fp == NULL)
                                return -1;
 
-                       if (ntohs(*fp) & ~0x7)
+                       *frag_offp = *fp;
+                       if (ntohs(*frag_offp) & ~0x7)
                                break;
                        hdrlen = 8;
                } else if (nexthdr == NEXTHDR_AUTH)
index 2955715..b6c5731 100644 (file)
@@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                        if (!ipv6_prefix_equal(&saddr, &r->src.addr,
                                               r->src.plen))
                                goto again;
-                       ipv6_addr_copy(&flp6->saddr, &saddr);
+                       flp6->saddr = saddr;
                }
                goto out;
        }
index 90868fb..01d46bf 100644 (file)
@@ -135,11 +135,12 @@ static int is_ineligible(struct sk_buff *skb)
        int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
        int len = skb->len - ptr;
        __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+       __be16 frag_off;
 
        if (len < 0)
                return 1;
 
-       ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
+       ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off);
        if (ptr < 0)
                return 0;
        if (nexthdr == IPPROTO_ICMPV6) {
@@ -290,9 +291,9 @@ static void mip6_addr_swap(struct sk_buff *skb)
                if (likely(off >= 0)) {
                        hao = (struct ipv6_destopt_hao *)
                                        (skb_network_header(skb) + off);
-                       ipv6_addr_copy(&tmp, &iph->saddr);
-                       ipv6_addr_copy(&iph->saddr, &hao->addr);
-                       ipv6_addr_copy(&hao->addr, &tmp);
+                       tmp = iph->saddr;
+                       iph->saddr = hao->addr;
+                       hao->addr = tmp;
                }
        }
 }
@@ -444,9 +445,9 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl6.daddr, &hdr->saddr);
+       fl6.daddr = hdr->saddr;
        if (saddr)
-               ipv6_addr_copy(&fl6.saddr, saddr);
+               fl6.saddr = *saddr;
        fl6.flowi6_oif = iif;
        fl6.fl6_icmp_type = type;
        fl6.fl6_icmp_code = code;
@@ -538,9 +539,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_ICMPV6;
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
+       fl6.daddr = ipv6_hdr(skb)->saddr;
        if (saddr)
-               ipv6_addr_copy(&fl6.saddr, saddr);
+               fl6.saddr = *saddr;
        fl6.flowi6_oif = skb->dev->ifindex;
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -596,6 +597,7 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        int inner_offset;
        int hash;
        u8 nexthdr;
+       __be16 frag_off;
 
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                return;
@@ -603,7 +605,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
        if (ipv6_ext_hdr(nexthdr)) {
                /* now skip over extension headers */
-               inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+               inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+                                               &nexthdr, &frag_off);
                if (inner_offset<0)
                        return;
        } else {
@@ -786,8 +789,8 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
                      int oif)
 {
        memset(fl6, 0, sizeof(*fl6));
-       ipv6_addr_copy(&fl6->saddr, saddr);
-       ipv6_addr_copy(&fl6->daddr, daddr);
+       fl6->saddr = *saddr;
+       fl6->daddr = *daddr;
        fl6->flowi6_proto       = IPPROTO_ICMPV6;
        fl6->fl6_icmp_type      = type;
        fl6->fl6_icmp_code      = 0;
index 1567fb1..02dd203 100644 (file)
@@ -65,9 +65,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+       fl6.daddr = treq->rmt_addr;
        final_p = fl6_update_dst(&fl6, np->opt, &final);
-       ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+       fl6.saddr = treq->loc_addr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -157,7 +157,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
 
        sin6->sin6_family = AF_INET6;
-       ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
+       sin6->sin6_addr = np->daddr;
        sin6->sin6_port = inet_sk(sk)->inet_dport;
        /* We do not store received flowlabel for TCP */
        sin6->sin6_flowinfo = 0;
@@ -215,8 +215,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = sk->sk_protocol;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+       fl6.daddr = np->daddr;
+       fl6.saddr = np->saddr;
        fl6.flowlabel = np->flow_label;
        IP6_ECN_flow_xmit(sk, fl6.flowlabel);
        fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -246,7 +246,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
        skb_dst_set_noref(skb, dst);
 
        /* Restore final destination back after routing done */
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
+       fl6.daddr = np->daddr;
 
        res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
        rcu_read_unlock();
index 93718f3..2783631 100644 (file)
@@ -190,7 +190,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
        struct fib6_table *table;
 
        table = kzalloc(sizeof(*table), GFP_ATOMIC);
-       if (table != NULL) {
+       if (table) {
                table->tb6_id = id;
                table->tb6_root.leaf = net->ipv6.ip6_null_entry;
                table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
@@ -210,7 +210,7 @@ struct fib6_table *fib6_new_table(struct net *net, u32 id)
                return tb;
 
        tb = fib6_alloc_table(net, id);
-       if (tb != NULL)
+       if (tb)
                fib6_link_table(net, tb);
 
        return tb;
@@ -367,7 +367,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        s_e = cb->args[1];
 
        w = (void *)cb->args[2];
-       if (w == NULL) {
+       if (!w) {
                /* New dump:
                 *
                 * 1. hook callback destructor.
@@ -379,7 +379,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
                 * 2. allocate and initialize walker.
                 */
                w = kzalloc(sizeof(*w), GFP_ATOMIC);
-               if (w == NULL)
+               if (!w)
                        return -ENOMEM;
                w->func = fib6_dump_node;
                cb->args[2] = (long)w;
@@ -425,7 +425,8 @@ out:
 
 static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
                                     int addrlen, int plen,
-                                    int offset)
+                                    int offset, int allow_create,
+                                    int replace_required)
 {
        struct fib6_node *fn, *in, *ln;
        struct fib6_node *pn = NULL;
@@ -447,8 +448,18 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
                 *      Prefix match
                 */
                if (plen < fn->fn_bit ||
-                   !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
+                   !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
+                       if (!allow_create) {
+                               if (replace_required) {
+                                       pr_warn("IPv6: Can't replace route, "
+                                               "no match found\n");
+                                       return ERR_PTR(-ENOENT);
+                               }
+                               pr_warn("IPv6: NLM_F_CREATE should be set "
+                                       "when creating new route\n");
+                       }
                        goto insert_above;
+               }
 
                /*
                 *      Exact match ?
@@ -456,7 +467,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
 
                if (plen == fn->fn_bit) {
                        /* clean up an intermediate node */
-                       if ((fn->fn_flags & RTN_RTINFO) == 0) {
+                       if (!(fn->fn_flags & RTN_RTINFO)) {
                                rt6_release(fn->leaf);
                                fn->leaf = NULL;
                        }
@@ -477,6 +488,23 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
                fn = dir ? fn->right: fn->left;
        } while (fn);
 
+       if (!allow_create) {
+               /* We should not create new node because
+                * NLM_F_REPLACE was specified without NLM_F_CREATE
+                * I assume it is safe to require NLM_F_CREATE when
+                * REPLACE flag is used! Later we may want to remove the
+                * check for replace_required, because according
+                * to netlink specification, NLM_F_CREATE
+                * MUST be specified if new route is created.
+                * That would keep IPv6 consistent with IPv4
+                */
+               if (replace_required) {
+                       pr_warn("IPv6: Can't replace route, no match found\n");
+                       return ERR_PTR(-ENOENT);
+               }
+               pr_warn("IPv6: NLM_F_CREATE should be set "
+                       "when creating new route\n");
+       }
        /*
         *      We walked to the bottom of tree.
         *      Create new leaf node without children.
@@ -484,7 +512,7 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
 
        ln = node_alloc();
 
-       if (ln == NULL)
+       if (!ln)
                return NULL;
        ln->fn_bit = plen;
 
@@ -527,7 +555,7 @@ insert_above:
                in = node_alloc();
                ln = node_alloc();
 
-               if (in == NULL || ln == NULL) {
+               if (!in || !ln) {
                        if (in)
                                node_free(in);
                        if (ln)
@@ -581,7 +609,7 @@ insert_above:
 
                ln = node_alloc();
 
-               if (ln == NULL)
+               if (!ln)
                        return NULL;
 
                ln->fn_bit = plen;
@@ -614,10 +642,15 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
+       int replace = (info->nlh &&
+                      (info->nlh->nlmsg_flags & NLM_F_REPLACE));
+       int add = (!info->nlh ||
+                  (info->nlh->nlmsg_flags & NLM_F_CREATE));
+       int found = 0;
 
        ins = &fn->leaf;
 
-       for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
+       for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
                /*
                 *      Search for duplicates
                 */
@@ -626,15 +659,22 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                        /*
                         *      Same priority level
                         */
+                       if (info->nlh &&
+                           (info->nlh->nlmsg_flags & NLM_F_EXCL))
+                               return -EEXIST;
+                       if (replace) {
+                               found++;
+                               break;
+                       }
 
                        if (iter->rt6i_dev == rt->rt6i_dev &&
                            iter->rt6i_idev == rt->rt6i_idev &&
                            ipv6_addr_equal(&iter->rt6i_gateway,
                                            &rt->rt6i_gateway)) {
-                               if (!(iter->rt6i_flags&RTF_EXPIRES))
+                               if (!(iter->rt6i_flags & RTF_EXPIRES))
                                        return -EEXIST;
                                iter->rt6i_expires = rt->rt6i_expires;
-                               if (!(rt->rt6i_flags&RTF_EXPIRES)) {
+                               if (!(rt->rt6i_flags & RTF_EXPIRES)) {
                                        iter->rt6i_flags &= ~RTF_EXPIRES;
                                        iter->rt6i_expires = 0;
                                }
@@ -655,17 +695,40 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
        /*
         *      insert node
         */
+       if (!replace) {
+               if (!add)
+                       pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n");
+
+add:
+               rt->dst.rt6_next = iter;
+               *ins = rt;
+               rt->rt6i_node = fn;
+               atomic_inc(&rt->rt6i_ref);
+               inet6_rt_notify(RTM_NEWROUTE, rt, info);
+               info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
+
+               if (!(fn->fn_flags & RTN_RTINFO)) {
+                       info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+                       fn->fn_flags |= RTN_RTINFO;
+               }
 
-       rt->dst.rt6_next = iter;
-       *ins = rt;
-       rt->rt6i_node = fn;
-       atomic_inc(&rt->rt6i_ref);
-       inet6_rt_notify(RTM_NEWROUTE, rt, info);
-       info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
-
-       if ((fn->fn_flags & RTN_RTINFO) == 0) {
-               info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
-               fn->fn_flags |= RTN_RTINFO;
+       } else {
+               if (!found) {
+                       if (add)
+                               goto add;
+                       pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n");
+                       return -ENOENT;
+               }
+               *ins = rt;
+               rt->rt6i_node = fn;
+               rt->dst.rt6_next = iter->dst.rt6_next;
+               atomic_inc(&rt->rt6i_ref);
+               inet6_rt_notify(RTM_NEWROUTE, rt, info);
+               rt6_release(iter);
+               if (!(fn->fn_flags & RTN_RTINFO)) {
+                       info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+                       fn->fn_flags |= RTN_RTINFO;
+               }
        }
 
        return 0;
@@ -674,7 +737,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
 {
        if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
-           (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
+           (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
                mod_timer(&net->ipv6.ip6_fib_timer,
                          jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
 }
@@ -696,11 +759,28 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 {
        struct fib6_node *fn, *pn = NULL;
        int err = -ENOMEM;
+       int allow_create = 1;
+       int replace_required = 0;
+
+       if (info->nlh) {
+               if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
+                       allow_create = 0;
+               if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
+                       replace_required = 1;
+       }
+       if (!allow_create && !replace_required)
+               pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
 
        fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
-                       rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
+                       rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
+                       allow_create, replace_required);
+
+       if (IS_ERR(fn)) {
+               err = PTR_ERR(fn);
+               fn = NULL;
+       }
 
-       if (fn == NULL)
+       if (!fn)
                goto out;
 
        pn = fn;
@@ -709,7 +789,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
        if (rt->rt6i_src.plen) {
                struct fib6_node *sn;
 
-               if (fn->subtree == NULL) {
+               if (!fn->subtree) {
                        struct fib6_node *sfn;
 
                        /*
@@ -724,7 +804,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
                        /* Create subtree root node */
                        sfn = node_alloc();
-                       if (sfn == NULL)
+                       if (!sfn)
                                goto st_failure;
 
                        sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
@@ -736,9 +816,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 
                        sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
                                        sizeof(struct in6_addr), rt->rt6i_src.plen,
-                                       offsetof(struct rt6_info, rt6i_src));
+                                       offsetof(struct rt6_info, rt6i_src),
+                                       allow_create, replace_required);
 
-                       if (sn == NULL) {
+                       if (!sn) {
                                /* If it is failed, discard just allocated
                                   root, and then (in st_failure) stale node
                                   in main tree.
@@ -753,13 +834,18 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
                } else {
                        sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
                                        sizeof(struct in6_addr), rt->rt6i_src.plen,
-                                       offsetof(struct rt6_info, rt6i_src));
+                                       offsetof(struct rt6_info, rt6i_src),
+                                       allow_create, replace_required);
 
-                       if (sn == NULL)
+                       if (IS_ERR(sn)) {
+                               err = PTR_ERR(sn);
+                               sn = NULL;
+                       }
+                       if (!sn)
                                goto st_failure;
                }
 
-               if (fn->leaf == NULL) {
+               if (!fn->leaf) {
                        fn->leaf = rt;
                        atomic_inc(&rt->rt6i_ref);
                }
@@ -768,10 +854,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
 #endif
 
        err = fib6_add_rt2node(fn, rt, info);
-
-       if (err == 0) {
+       if (!err) {
                fib6_start_gc(info->nl_net, rt);
-               if (!(rt->rt6i_flags&RTF_CACHE))
+               if (!(rt->rt6i_flags & RTF_CACHE))
                        fib6_prune_clones(info->nl_net, pn, rt);
        }
 
@@ -819,7 +904,7 @@ st_failure:
  */
 
 struct lookup_args {
-       int             offset;         /* key offset on rt6_info       */
+       int                     offset;         /* key offset on rt6_info       */
        const struct in6_addr   *addr;          /* search key                   */
 };
 
@@ -849,11 +934,10 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
                        fn = next;
                        continue;
                }
-
                break;
        }
 
-       while(fn) {
+       while (fn) {
                if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
                        struct rt6key *key;
 
@@ -900,8 +984,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *da
        };
 
        fn = fib6_lookup_1(root, daddr ? args : args + 1);
-
-       if (fn == NULL || fn->fn_flags & RTN_TL_ROOT)
+       if (!fn || fn->fn_flags & RTN_TL_ROOT)
                fn = root;
 
        return fn;
@@ -961,7 +1044,7 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
        }
 #endif
 
-       if (fn && fn->fn_flags&RTN_RTINFO)
+       if (fn && fn->fn_flags & RTN_RTINFO)
                return fn;
 
        return NULL;
@@ -975,14 +1058,13 @@ struct fib6_node * fib6_locate(struct fib6_node *root,
 
 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
 {
-       if (fn->fn_flags&RTN_ROOT)
+       if (fn->fn_flags & RTN_ROOT)
                return net->ipv6.ip6_null_entry;
 
-       while(fn) {
-               if(fn->left)
+       while (fn) {
+               if (fn->left)
                        return fn->left->leaf;
-
-               if(fn->right)
+               if (fn->right)
                        return fn->right->leaf;
 
                fn = FIB6_SUBTREE(fn);
@@ -1020,12 +1102,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
                if (children == 3 || FIB6_SUBTREE(fn)
 #ifdef CONFIG_IPV6_SUBTREES
                    /* Subtree root (i.e. fn) may have one child */
-                   || (children && fn->fn_flags&RTN_ROOT)
+                   || (children && fn->fn_flags & RTN_ROOT)
 #endif
                    ) {
                        fn->leaf = fib6_find_prefix(net, fn);
 #if RT6_DEBUG >= 2
-                       if (fn->leaf==NULL) {
+                       if (!fn->leaf) {
                                WARN_ON(!fn->leaf);
                                fn->leaf = net->ipv6.ip6_null_entry;
                        }
@@ -1058,7 +1140,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
 
                read_lock(&fib6_walker_lock);
                FOR_WALKERS(w) {
-                       if (child == NULL) {
+                       if (!child) {
                                if (w->root == fn) {
                                        w->root = w->node = NULL;
                                        RT6_TRACE("W %p adjusted by delroot 1\n", w);
@@ -1087,7 +1169,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
                read_unlock(&fib6_walker_lock);
 
                node_free(fn);
-               if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn))
+               if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
                        return pn;
 
                rt6_release(pn->leaf);
@@ -1121,7 +1203,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                if (w->state == FWS_C && w->leaf == rt) {
                        RT6_TRACE("walker %p adjusted by delroute\n", w);
                        w->leaf = rt->dst.rt6_next;
-                       if (w->leaf == NULL)
+                       if (!w->leaf)
                                w->state = FWS_U;
                }
        }
@@ -1130,7 +1212,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
        rt->dst.rt6_next = NULL;
 
        /* If it was last route, expunge its radix tree node */
-       if (fn->leaf == NULL) {
+       if (!fn->leaf) {
                fn->fn_flags &= ~RTN_RTINFO;
                net->ipv6.rt6_stats->fib_route_nodes--;
                fn = fib6_repair_tree(net, fn);
@@ -1144,7 +1226,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                 * to still alive ones.
                 */
                while (fn) {
-                       if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
+                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
                                fn->leaf = fib6_find_prefix(net, fn);
                                atomic_inc(&fn->leaf->rt6i_ref);
                                rt6_release(rt);
@@ -1171,17 +1253,17 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
                return -ENOENT;
        }
 #endif
-       if (fn == NULL || rt == net->ipv6.ip6_null_entry)
+       if (!fn || rt == net->ipv6.ip6_null_entry)
                return -ENOENT;
 
        WARN_ON(!(fn->fn_flags & RTN_RTINFO));
 
-       if (!(rt->rt6i_flags&RTF_CACHE)) {
+       if (!(rt->rt6i_flags & RTF_CACHE)) {
                struct fib6_node *pn = fn;
 #ifdef CONFIG_IPV6_SUBTREES
                /* clones of this route might be in another subtree */
                if (rt->rt6i_src.plen) {
-                       while (!(pn->fn_flags&RTN_ROOT))
+                       while (!(pn->fn_flags & RTN_ROOT))
                                pn = pn->parent;
                        pn = pn->parent;
                }
@@ -1232,11 +1314,11 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
 
        for (;;) {
                fn = w->node;
-               if (fn == NULL)
+               if (!fn)
                        return 0;
 
                if (w->prune && fn != w->root &&
-                   fn->fn_flags&RTN_RTINFO && w->state < FWS_C) {
+                   fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
                        w->state = FWS_C;
                        w->leaf = fn->leaf;
                }
@@ -1265,7 +1347,7 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
                        w->state = FWS_C;
                        w->leaf = fn->leaf;
                case FWS_C:
-                       if (w->leaf && fn->fn_flags&RTN_RTINFO) {
+                       if (w->leaf && fn->fn_flags & RTN_RTINFO) {
                                int err;
 
                                if (w->count < w->skip) {
@@ -1439,7 +1521,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
         *      only if they are not in use now.
         */
 
-       if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
+       if (rt->rt6i_flags & RTF_EXPIRES && rt->rt6i_expires) {
                if (time_after(now, rt->rt6i_expires)) {
                        RT6_TRACE("expiring %p\n", rt);
                        return -1;
@@ -1451,7 +1533,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                        RT6_TRACE("aging clone %p\n", rt);
                        return -1;
                } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
-                          (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
+                          (!(dst_get_neighbour_noref_raw(&rt->dst)->flags & NTF_ROUTER))) {
                        RT6_TRACE("purging route %p via non-router but gateway\n",
                                  rt);
                        return -1;
index 4566dbd..b7867a1 100644 (file)
@@ -386,7 +386,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                err = -EINVAL;
                goto done;
        }
-       ipv6_addr_copy(&fl->dst, &freq->flr_dst);
+       fl->dst = freq->flr_dst;
        atomic_set(&fl->users, 1);
        switch (fl->share) {
        case IPV6_FL_S_EXCL:
index a46c64e..1ca5d45 100644 (file)
@@ -280,6 +280,7 @@ int ip6_mc_input(struct sk_buff *skb)
                        u8 *ptr = skb_network_header(skb) + opt->ra;
                        struct icmp6hdr *icmp6;
                        u8 nexthdr = hdr->nexthdr;
+                       __be16 frag_off;
                        int offset;
 
                        /* Check if the value of Router Alert
@@ -293,7 +294,7 @@ int ip6_mc_input(struct sk_buff *skb)
                                        goto out;
                                }
                                offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
-                                                         &nexthdr);
+                                                         &nexthdr, &frag_off);
                                if (offset < 0)
                                        goto out;
 
index 84d0bd5..71d2699 100644 (file)
@@ -136,7 +136,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(dst);
+       neigh = dst_get_neighbour_noref(dst);
        if (neigh) {
                int res = neigh_output(neigh, skb);
 
@@ -238,8 +238,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        hdr->nexthdr = proto;
        hdr->hop_limit = hlimit;
 
-       ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
-       ipv6_addr_copy(&hdr->daddr, first_hop);
+       hdr->saddr = fl6->saddr;
+       hdr->daddr = *first_hop;
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
@@ -290,8 +290,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
        hdr->nexthdr = proto;
        hdr->hop_limit = np->hop_limit;
 
-       ipv6_addr_copy(&hdr->saddr, saddr);
-       ipv6_addr_copy(&hdr->daddr, daddr);
+       hdr->saddr = *saddr;
+       hdr->daddr = *daddr;
 
        return 0;
 }
@@ -329,10 +329,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
 {
        struct ipv6hdr *hdr = ipv6_hdr(skb);
        u8 nexthdr = hdr->nexthdr;
+       __be16 frag_off;
        int offset;
 
        if (ipv6_ext_hdr(nexthdr)) {
-               offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
+               offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
                if (offset < 0)
                        return 0;
        } else
@@ -462,7 +463,7 @@ int ip6_forward(struct sk_buff *skb)
           send redirects to source routed frames.
           We don't send redirects to frames decapsulated from IPsec.
         */
-       n = dst_get_neighbour(dst);
+       n = dst_get_neighbour_noref(dst);
        if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
                struct in6_addr *target = NULL;
                struct rt6_info *rt;
@@ -631,6 +632,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        struct ipv6hdr *tmp_hdr;
        struct frag_hdr *fh;
        unsigned int mtu, hlen, left, len;
+       int hroom, troom;
        __be32 frag_id = 0;
        int ptr, offset = 0, err=0;
        u8 *prevhdr, nexthdr = 0;
@@ -797,6 +799,8 @@ slow_path:
         */
 
        *prevhdr = NEXTHDR_FRAGMENT;
+       hroom = LL_RESERVED_SPACE(rt->dst.dev);
+       troom = rt->dst.dev->needed_tailroom;
 
        /*
         *      Keep copying data until we run out.
@@ -815,7 +819,8 @@ slow_path:
                 *      Allocate buffer.
                 */
 
-               if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
+               if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
+                                     hroom + troom, GFP_ATOMIC)) == NULL) {
                        NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
                        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                                      IPSTATS_MIB_FRAGFAILS);
@@ -828,7 +833,7 @@ slow_path:
                 */
 
                ip6_copy_metadata(frag, skb);
-               skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
+               skb_reserve(frag, hroom);
                skb_put(frag, len + hlen + sizeof(struct frag_hdr));
                skb_reset_network_header(frag);
                fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -978,7 +983,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         * dst entry of the nexthop router
         */
        rcu_read_lock();
-       n = dst_get_neighbour(*dst);
+       n = dst_get_neighbour_noref(*dst);
        if (n && !(n->nud_state & NUD_VALID)) {
                struct inet6_ifaddr *ifp;
                struct flowi6 fl_gw6;
@@ -1059,7 +1064,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (err)
                return ERR_PTR(err);
        if (final_dst)
-               ipv6_addr_copy(&fl6->daddr, final_dst);
+               fl6->daddr = *final_dst;
        if (can_sleep)
                fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
@@ -1095,7 +1100,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        if (err)
                return ERR_PTR(err);
        if (final_dst)
-               ipv6_addr_copy(&fl6->daddr, final_dst);
+               fl6->daddr = *final_dst;
        if (can_sleep)
                fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
 
@@ -1588,7 +1593,7 @@ int ip6_push_pending_frames(struct sock *sk)
        if (np->pmtudisc < IPV6_PMTUDISC_DO)
                skb->local_df = 1;
 
-       ipv6_addr_copy(final_dst, &fl6->daddr);
+       *final_dst = fl6->daddr;
        __skb_pull(skb, skb_network_header_len(skb));
        if (opt && opt->opt_flen)
                ipv6_push_frag_opts(skb, opt, &proto);
@@ -1604,8 +1609,8 @@ int ip6_push_pending_frames(struct sock *sk)
 
        hdr->hop_limit = np->cork.hop_limit;
        hdr->nexthdr = proto;
-       ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
-       ipv6_addr_copy(&hdr->daddr, final_dst);
+       hdr->saddr = fl6->saddr;
+       hdr->daddr = *final_dst;
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
index 4e2e9ff..f5f98f5 100644 (file)
@@ -93,7 +93,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
 {
@@ -979,8 +979,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
-       ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
-       ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
+       ipv6h->saddr = fl6->saddr;
+       ipv6h->daddr = fl6->daddr;
        nf_reset(skb);
        pkt_len = skb->len;
        err = ip6_local_out(skb);
@@ -1155,8 +1155,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
 
        /* Set up flowi template */
-       ipv6_addr_copy(&fl6->saddr, &p->laddr);
-       ipv6_addr_copy(&fl6->daddr, &p->raddr);
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
        fl6->flowi6_oif = p->link;
        fl6->flowlabel = 0;
 
@@ -1212,8 +1212,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
 static int
 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
 {
-       ipv6_addr_copy(&t->parms.laddr, &p->laddr);
-       ipv6_addr_copy(&t->parms.raddr, &p->raddr);
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
        t->parms.flags = p->flags;
        t->parms.hop_limit = p->hop_limit;
        t->parms.encap_limit = p->encap_limit;
index 449a918..c7e95c8 100644 (file)
@@ -1105,8 +1105,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
                msg->im6_msgtype = MRT6MSG_WHOLEPKT;
                msg->im6_mif = mrt->mroute_reg_vif_num;
                msg->im6_pad = 0;
-               ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
-               ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+               msg->im6_src = ipv6_hdr(pkt)->saddr;
+               msg->im6_dst = ipv6_hdr(pkt)->daddr;
 
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else
@@ -1131,8 +1131,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
        msg->im6_msgtype = assert;
        msg->im6_mif = mifi;
        msg->im6_pad = 0;
-       ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
-       ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
+       msg->im6_src = ipv6_hdr(pkt)->saddr;
+       msg->im6_dst = ipv6_hdr(pkt)->daddr;
 
        skb_dst_set(skb, dst_clone(skb_dst(pkt)));
        skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2181,8 +2181,8 @@ int ip6mr_get_route(struct net *net,
                iph->payload_len = 0;
                iph->nexthdr = IPPROTO_NONE;
                iph->hop_limit = 0;
-               ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
-               ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
+               iph->saddr = rt->rt6i_src.addr;
+               iph->daddr = rt->rt6i_dst.addr;
 
                err = ip6mr_cache_unresolved(mrt, vif, skb2);
                read_unlock(&mrt_lock);
index 26cb08c..18a2719 100644 (file)
@@ -435,7 +435,7 @@ sticky_done:
                        goto e_inval;
 
                np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
-               ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr);
+               np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr;
                retv = 0;
                break;
        }
@@ -980,8 +980,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
-                                       ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxhlim) {
@@ -992,8 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) :
-                                       ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr));
+                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxohlim) {
index ee7839f..ea34d58 100644 (file)
@@ -155,7 +155,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
                return -ENOMEM;
 
        mc_lst->next = NULL;
-       ipv6_addr_copy(&mc_lst->addr, addr);
+       mc_lst->addr = *addr;
 
        rcu_read_lock();
        if (ifindex == 0) {
@@ -858,7 +858,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
 
        setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
 
-       ipv6_addr_copy(&mc->mca_addr, addr);
+       mc->mca_addr = *addr;
        mc->idev = idev; /* (reference taken) */
        mc->mca_users = 1;
        /* mca_stamp should be updated upon changes */
@@ -1343,13 +1343,15 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
        struct mld2_report *pmr;
        struct in6_addr addr_buf;
        const struct in6_addr *saddr;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
        int err;
        u8 ra[8] = { IPPROTO_ICMPV6, 0,
                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
                     IPV6_TLV_PADN, 0 };
 
        /* we assume size > sizeof(ra) here */
-       size += LL_ALLOCATED_SPACE(dev);
+       size += hlen + tlen;
        /* limit our allocations to order-0 page */
        size = min_t(int, size, SKB_MAX_ORDER(0, 0));
        skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1357,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
        if (!skb)
                return NULL;
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
@@ -1408,18 +1410,11 @@ static void mld_sendpack(struct sk_buff *skb)
                                           csum_partial(skb_transport_header(skb),
                                                        mldlen, 0));
 
-       dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-
-       if (!dst) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
        icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
+       dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
 
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
        err = 0;
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
@@ -1723,6 +1718,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        struct mld_msg *hdr;
        const struct in6_addr *snd_addr, *saddr;
        struct in6_addr addr_buf;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
        int err, len, payload_len, full_len;
        u8 ra[8] = { IPPROTO_ICMPV6, 0,
                     IPV6_TLV_ROUTERALERT, 2, 0, 0,
@@ -1744,7 +1741,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                      IPSTATS_MIB_OUT, full_len);
        rcu_read_unlock();
 
-       skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err);
+       skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
 
        if (skb == NULL) {
                rcu_read_lock();
@@ -1754,7 +1751,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
                return;
        }
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
 
        if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
                /* <draft-ietf-magma-mld-source-05.txt>:
@@ -1772,7 +1769,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg));
        memset(hdr, 0, sizeof(struct mld_msg));
        hdr->mld_type = type;
-       ipv6_addr_copy(&hdr->mld_mca, addr);
+       hdr->mld_mca = *addr;
 
        hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
                                         IPPROTO_ICMPV6,
@@ -1781,17 +1778,10 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
        rcu_read_lock();
        idev = __in6_dev_get(skb->dev);
 
-       dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
-       if (!dst) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-
        icmpv6_flow_init(sk, &fl6, type,
                         &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
                         skb->dev->ifindex);
-
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       dst = icmp6_dst_alloc(skb->dev, NULL, &fl6);
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
                goto err_out;
@@ -1914,7 +1904,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  * Add multicast single-source filter to the interface list
  */
 static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
-       const struct in6_addr *psfsrc, int delta)
+       const struct in6_addr *psfsrc)
 {
        struct ip6_sf_list *psf, *psf_prev;
 
@@ -2045,7 +2035,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                pmc->mca_sfcount[sfmode]++;
        err = 0;
        for (i=0; i<sfcount; i++) {
-               err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
+               err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
                if (err)
                        break;
        }
index 43242e6..7e1e0fb 100644 (file)
@@ -195,8 +195,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp,
                mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
                mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
                mip6_report_rl.iif = iif;
-               ipv6_addr_copy(&mip6_report_rl.src, src);
-               ipv6_addr_copy(&mip6_report_rl.dst, dst);
+               mip6_report_rl.src = *src;
+               mip6_report_rl.dst = *dst;
                allow = 1;
        }
        spin_unlock_bh(&mip6_report_rl.lock);
index 0cb78d7..f3e50c2 100644 (file)
@@ -126,7 +126,6 @@ static const struct neigh_ops ndisc_direct_ops = {
 
 struct neigh_table nd_tbl = {
        .family =       AF_INET6,
-       .entry_size =   sizeof(struct neighbour) + sizeof(struct in6_addr),
        .key_len =      sizeof(struct in6_addr),
        .hash =         ndisc_hash,
        .constructor =  ndisc_constructor,
@@ -141,7 +140,7 @@ struct neigh_table nd_tbl = {
                .gc_staletime           = 60 * HZ,
                .reachable_time         = ND_REACHABLE_TIME,
                .delay_probe_time       = 5 * HZ,
-               .queue_len              = 3,
+               .queue_len_bytes        = 64*1024,
                .ucast_probes           = 3,
                .mcast_probes           = 3,
                .anycast_delay          = 1 * HZ,
@@ -446,6 +445,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
        struct sock *sk = net->ipv6.ndisc_sk;
        struct sk_buff *skb;
        struct icmp6hdr *hdr;
+       int hlen = LL_RESERVED_SPACE(dev);
+       int tlen = dev->needed_tailroom;
        int len;
        int err;
        u8 *opt;
@@ -459,7 +460,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
 
        skb = sock_alloc_send_skb(sk,
                                  (MAX_HEADER + sizeof(struct ipv6hdr) +
-                                  len + LL_ALLOCATED_SPACE(dev)),
+                                  len + hlen + tlen),
                                  1, &err);
        if (!skb) {
                ND_PRINTK0(KERN_ERR
@@ -468,7 +469,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
                return NULL;
        }
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);
 
        skb->transport_header = skb->tail;
@@ -479,7 +480,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
 
        opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
        if (target) {
-               ipv6_addr_copy((struct in6_addr *)opt, target);
+               *(struct in6_addr *)opt = *target;
                opt += sizeof(*target);
        }
 
@@ -515,14 +516,7 @@ void ndisc_send_skb(struct sk_buff *skb,
        type = icmp6h->icmp6_type;
 
        icmpv6_flow_init(sk, &fl6, type, saddr, daddr, dev->ifindex);
-
-       dst = icmp6_dst_alloc(dev, neigh, daddr);
-       if (!dst) {
-               kfree_skb(skb);
-               return;
-       }
-
-       dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+       dst = icmp6_dst_alloc(dev, neigh, &fl6);
        if (IS_ERR(dst)) {
                kfree_skb(skb);
                return;
@@ -1237,7 +1231,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
 
        if (rt)
-               neigh = dst_get_neighbour(&rt->dst);
+               neigh = dst_get_neighbour_noref(&rt->dst);
 
        if (rt && lifetime == 0) {
                neigh_clone(neigh);
@@ -1257,7 +1251,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                        return;
                }
 
-               neigh = dst_get_neighbour(&rt->dst);
+               neigh = dst_get_neighbour_noref(&rt->dst);
                if (neigh == NULL) {
                        ND_PRINTK0(KERN_ERR
                                   "ICMPv6 RA: %s() got default router without neighbour.\n",
@@ -1533,6 +1527,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        struct inet6_dev *idev;
        struct flowi6 fl6;
        u8 *opt;
+       int hlen, tlen;
        int rd_len;
        int err;
        u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
@@ -1590,9 +1585,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        rd_len &= ~0x7;
        len += rd_len;
 
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
        buff = sock_alloc_send_skb(sk,
                                   (MAX_HEADER + sizeof(struct ipv6hdr) +
-                                   len + LL_ALLOCATED_SPACE(dev)),
+                                   len + hlen + tlen),
                                   1, &err);
        if (buff == NULL) {
                ND_PRINTK0(KERN_ERR
@@ -1601,7 +1598,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
                goto release;
        }
 
-       skb_reserve(buff, LL_RESERVED_SPACE(dev));
+       skb_reserve(buff, hlen);
        ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
                   IPPROTO_ICMPV6, len);
 
@@ -1617,9 +1614,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
         */
 
        addrp = (struct in6_addr *)(icmph + 1);
-       ipv6_addr_copy(addrp, target);
+       *addrp = *target;
        addrp++;
-       ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr);
+       *addrp = ipv6_hdr(skb)->daddr;
 
        opt = (u8*) (addrp + 1);
 
index e63c397..fb80a23 100644 (file)
@@ -405,6 +405,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
        int status, type, pid, flags;
        unsigned int nlmsglen, skblen;
        struct nlmsghdr *nlh;
+       bool enable_timestamp = false;
 
        skblen = skb->len;
        if (skblen < sizeof(*nlh))
@@ -442,11 +443,13 @@ __ipq_rcv_skb(struct sk_buff *skb)
                        RCV_SKB_FAIL(-EBUSY);
                }
        } else {
-               net_enable_timestamp();
+               enable_timestamp = true;
                peer_pid = pid;
        }
 
        spin_unlock_bh(&queue_lock);
+       if (enable_timestamp)
+               net_enable_timestamp();
 
        status = ipq_receive_peer(NLMSG_DATA(nlh), type,
                                  nlmsglen - NLMSG_LENGTH(0));
index a5a4c5d..aad2fa4 100644 (file)
@@ -49,6 +49,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        const __u8 tclass = DEFAULT_TOS_VALUE;
        struct dst_entry *dst = NULL;
        u8 proto;
+       __be16 frag_off;
        struct flowi6 fl6;
 
        if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
@@ -58,7 +59,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        }
 
        proto = oip6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto);
+       tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
 
        if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
                pr_debug("Cannot get TCP header.\n");
@@ -93,8 +94,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.saddr, &oip6h->daddr);
-       ipv6_addr_copy(&fl6.daddr, &oip6h->saddr);
+       fl6.saddr = oip6h->daddr;
+       fl6.daddr = oip6h->saddr;
        fl6.fl6_sport = otcph.dest;
        fl6.fl6_dport = otcph.source;
        security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
@@ -129,8 +130,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        *(__be32 *)ip6h =  htonl(0x60000000 | (tclass << 20));
        ip6h->hop_limit = ip6_dst_hoplimit(dst);
        ip6h->nexthdr = IPPROTO_TCP;
-       ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
-       ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
+       ip6h->saddr = oip6h->daddr;
+       ip6h->daddr = oip6h->saddr;
 
        tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
        /* Truncate to length (no data) */
index 1008ce9..fdeb6d0 100644 (file)
@@ -142,11 +142,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
        SNMP_MIB_SENTINEL
 };
 
-/* can be called either with percpu mib (pcpumib != NULL),
- * or shared one (smib != NULL)
- */
-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
-                                    atomic_long_t *smib)
+static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib)
 {
        char name[32];
        int i;
@@ -163,14 +159,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpum
                snprintf(name, sizeof(name), "Icmp6%s%s",
                        i & 0x100 ? "Out" : "In", p);
                seq_printf(seq, "%-32s\t%lu\n", name,
-                       pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
+                          atomic_long_read(smib + i));
        }
 
        /* print by number (nonzero only) - ICMPMsgStat format */
        for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
                unsigned long val;
 
-               val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
+               val = atomic_long_read(smib + i);
                if (!val)
                        continue;
                snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -215,8 +211,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
                            snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
        snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
                            NULL, snmp6_icmp6_list);
-       snmp6_seq_show_icmpv6msg(seq,
-                           (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
+       snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs);
        snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
                            NULL, snmp6_udp6_list);
        snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
@@ -246,7 +241,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
                            snmp6_ipstats_list);
        snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
                            snmp6_icmp6_list);
-       snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
+       snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
        return 0;
 }
 
index 331af3b..a4894f4 100644 (file)
@@ -299,9 +299,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        }
 
        inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-       ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr);
+       np->rcv_saddr = addr->sin6_addr;
        if (!(addr_type & IPV6_ADDR_MULTICAST))
-               ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
+               np->saddr = addr->sin6_addr;
        err = 0;
 out_unlock:
        rcu_read_unlock();
@@ -383,7 +383,8 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
        }
 
        /* Charge it to the socket. */
-       if (ip_queue_rcv_skb(sk, skb) < 0) {
+       skb_dst_drop(skb);
+       if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -494,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        if (sin6) {
                sin6->sin6_family = AF_INET6;
                sin6->sin6_port = 0;
-               ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+               sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                sin6->sin6_flowinfo = 0;
                sin6->sin6_scope_id = 0;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -610,6 +611,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
        struct sk_buff *skb;
        int err;
        struct rt6_info *rt = (struct rt6_info *)*dstp;
+       int hlen = LL_RESERVED_SPACE(rt->dst.dev);
+       int tlen = rt->dst.dev->needed_tailroom;
 
        if (length > rt->dst.dev->mtu) {
                ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
@@ -619,11 +622,11 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
                goto out;
 
        skb = sock_alloc_send_skb(sk,
-                                 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
+                                 length + hlen + tlen + 15,
                                  flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto error;
-       skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
+       skb_reserve(skb, hlen);
 
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
@@ -843,11 +846,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                goto out;
 
        if (!ipv6_addr_any(daddr))
-               ipv6_addr_copy(&fl6.daddr, daddr);
+               fl6.daddr = *daddr;
        else
                fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
        if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.saddr = np->saddr;
 
        final_p = fl6_update_dst(&fl6, opt, &final);
 
index dfb164e..b69fae7 100644 (file)
@@ -153,8 +153,8 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
 
        fq->id = arg->id;
        fq->user = arg->user;
-       ipv6_addr_copy(&fq->saddr, arg->src);
-       ipv6_addr_copy(&fq->daddr, arg->dst);
+       fq->saddr = *arg->src;
+       fq->daddr = *arg->dst;
 }
 EXPORT_SYMBOL(ip6_frag_init);
 
index b582a0a..ad43854 100644 (file)
@@ -247,9 +247,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
 {
        struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
 
-       if (rt != NULL)
+       if (rt)
                memset(&rt->rt6i_table, 0,
-                       sizeof(*rt) - sizeof(struct dst_entry));
+                      sizeof(*rt) - sizeof(struct dst_entry));
 
        return rt;
 }
@@ -263,7 +263,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
        if (!(rt->dst.flags & DST_HOST))
                dst_destroy_metrics_generic(dst);
 
-       if (idev != NULL) {
+       if (idev) {
                rt->rt6i_idev = NULL;
                in6_dev_put(idev);
        }
@@ -299,10 +299,10 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
        struct net_device *loopback_dev =
                dev_net(dev)->loopback_dev;
 
-       if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
+       if (dev != loopback_dev && idev && idev->dev == dev) {
                struct inet6_dev *loopback_idev =
                        in6_dev_get(loopback_dev);
-               if (loopback_idev != NULL) {
+               if (loopback_idev) {
                        rt->rt6i_idev = loopback_idev;
                        in6_dev_put(idev);
                }
@@ -344,7 +344,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
                        if (dev->ifindex == oif)
                                return sprt;
                        if (dev->flags & IFF_LOOPBACK) {
-                               if (sprt->rt6i_idev == NULL ||
+                               if (!sprt->rt6i_idev ||
                                    sprt->rt6i_idev->dev->ifindex != oif) {
                                        if (flags & RT6_LOOKUP_F_IFACE && oif)
                                                continue;
@@ -385,7 +385,7 @@ static void rt6_probe(struct rt6_info *rt)
         * to no more than one per minute.
         */
        rcu_read_lock();
-       neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+       neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
        if (!neigh || (neigh->nud_state & NUD_VALID))
                goto out;
        read_lock_bh(&neigh->lock);
@@ -432,7 +432,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
        int m;
 
        rcu_read_lock();
-       neigh = dst_get_neighbour(&rt->dst);
+       neigh = dst_get_neighbour_noref(&rt->dst);
        if (rt->rt6i_flags & RTF_NONEXTHOP ||
            !(rt->rt6i_flags & RTF_GATEWAY))
                m = 1;
@@ -636,7 +636,7 @@ do { \
                                goto restart; \
                } \
        } \
-} while(0)
+} while (0)
 
 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
                                             struct fib6_table *table,
@@ -727,24 +727,25 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                struct neighbour *neigh;
                int attempts = !in_softirq();
 
-               if (!(rt->rt6i_flags&RTF_GATEWAY)) {
+               if (!(rt->rt6i_flags & RTF_GATEWAY)) {
                        if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
-                       ipv6_addr_copy(&rt->rt6i_gateway, daddr);
+                       rt->rt6i_gateway = *daddr;
                }
 
                rt->rt6i_flags |= RTF_CACHE;
 
 #ifdef CONFIG_IPV6_SUBTREES
                if (rt->rt6i_src.plen && saddr) {
-                       ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
+                       rt->rt6i_src.addr = *saddr;
                        rt->rt6i_src.plen = 128;
                }
 #endif
 
        retry:
-               neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
+               neigh = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway,
+                                            rt->rt6i_dev);
                if (IS_ERR(neigh)) {
                        struct net *net = dev_net(rt->rt6i_dev);
                        int saved_rt_min_interval =
@@ -785,7 +786,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
 
        if (rt) {
                rt->rt6i_flags |= RTF_CACHE;
-               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
+               dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
        }
        return rt;
 }
@@ -819,7 +820,7 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -875,7 +876,7 @@ void ip6_route_input(struct sk_buff *skb)
                .flowi6_iif = skb->dev->ifindex,
                .daddr = iph->daddr,
                .saddr = iph->saddr,
-               .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
+               .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
                .flowi6_mark = skb->mark,
                .flowi6_proto = iph->nexthdr,
        };
@@ -934,7 +935,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
                        in6_dev_hold(rt->rt6i_idev);
                rt->rt6i_expires = 0;
 
-               ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+               rt->rt6i_gateway = ort->rt6i_gateway;
                rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
                rt->rt6i_metric = 0;
 
@@ -997,7 +998,7 @@ static void ip6_link_failure(struct sk_buff *skb)
 
        rt = (struct rt6_info *) skb_dst(skb);
        if (rt) {
-               if (rt->rt6i_flags&RTF_CACHE) {
+               if (rt->rt6i_flags & RTF_CACHE) {
                        dst_set_expires(&rt->dst, 0);
                        rt->rt6i_flags |= RTF_EXPIRES;
                } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
@@ -1067,34 +1068,38 @@ static DEFINE_SPINLOCK(icmp6_dst_lock);
 
 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
                                  struct neighbour *neigh,
-                                 const struct in6_addr *addr)
+                                 struct flowi6 *fl6)
 {
+       struct dst_entry *dst;
        struct rt6_info *rt;
        struct inet6_dev *idev = in6_dev_get(dev);
        struct net *net = dev_net(dev);
 
-       if (unlikely(idev == NULL))
+       if (unlikely(!idev))
                return NULL;
 
        rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
-       if (unlikely(rt == NULL)) {
+       if (unlikely(!rt)) {
                in6_dev_put(idev);
+               dst = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        if (neigh)
                neigh_hold(neigh);
        else {
-               neigh = ndisc_get_neigh(dev, addr);
-               if (IS_ERR(neigh))
-                       neigh = NULL;
+               neigh = __neigh_lookup_errno(&nd_tbl, &fl6->daddr, dev);
+               if (IS_ERR(neigh)) {
+                       dst_free(&rt->dst);
+                       return ERR_CAST(neigh);
+               }
        }
 
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
        dst_set_neighbour(&rt->dst, neigh);
        atomic_set(&rt->dst.__refcnt, 1);
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+       rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
        dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
@@ -1106,8 +1111,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 
        fib6_force_start_gc(net);
 
+       dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
+
 out:
-       return &rt->dst;
+       return dst;
 }
 
 int icmp6_dst_gc(void)
@@ -1237,15 +1244,24 @@ int ip6_route_add(struct fib6_config *cfg)
        if (cfg->fc_metric == 0)
                cfg->fc_metric = IP6_RT_PRIO_USER;
 
-       table = fib6_new_table(net, cfg->fc_table);
-       if (table == NULL) {
-               err = -ENOBUFS;
-               goto out;
+       err = -ENOBUFS;
+       if (cfg->fc_nlinfo.nlh &&
+           !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
+               table = fib6_get_table(net, cfg->fc_table);
+               if (!table) {
+                       printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
+                       table = fib6_new_table(net, cfg->fc_table);
+               }
+       } else {
+               table = fib6_new_table(net, cfg->fc_table);
        }
 
+       if (!table)
+               goto out;
+
        rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
 
-       if (rt == NULL) {
+       if (!rt) {
                err = -ENOMEM;
                goto out;
        }
@@ -1294,8 +1310,9 @@ int ip6_route_add(struct fib6_config *cfg)
           they would result in kernel looping; promote them to reject routes
         */
        if ((cfg->fc_flags & RTF_REJECT) ||
-           (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
-                                             && !(cfg->fc_flags&RTF_LOCAL))) {
+           (dev && (dev->flags & IFF_LOOPBACK) &&
+            !(addr_type & IPV6_ADDR_LOOPBACK) &&
+            !(cfg->fc_flags & RTF_LOCAL))) {
                /* hold loopback dev/idev if we haven't done so. */
                if (dev != net->loopback_dev) {
                        if (dev) {
@@ -1322,7 +1339,7 @@ int ip6_route_add(struct fib6_config *cfg)
                int gwa_type;
 
                gw_addr = &cfg->fc_gateway;
-               ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
+               rt->rt6i_gateway = *gw_addr;
                gwa_type = ipv6_addr_type(gw_addr);
 
                if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
@@ -1336,13 +1353,13 @@ int ip6_route_add(struct fib6_config *cfg)
                           some exceptions. --ANK
                         */
                        err = -EINVAL;
-                       if (!(gwa_type&IPV6_ADDR_UNICAST))
+                       if (!(gwa_type & IPV6_ADDR_UNICAST))
                                goto out;
 
                        grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
 
                        err = -EHOSTUNREACH;
-                       if (grt == NULL)
+                       if (!grt)
                                goto out;
                        if (dev) {
                                if (dev != grt->rt6i_dev) {
@@ -1355,7 +1372,7 @@ int ip6_route_add(struct fib6_config *cfg)
                                dev_hold(dev);
                                in6_dev_hold(grt->rt6i_idev);
                        }
-                       if (!(grt->rt6i_flags&RTF_GATEWAY))
+                       if (!(grt->rt6i_flags & RTF_GATEWAY))
                                err = 0;
                        dst_release(&grt->dst);
 
@@ -1363,12 +1380,12 @@ int ip6_route_add(struct fib6_config *cfg)
                                goto out;
                }
                err = -EINVAL;
-               if (dev == NULL || (dev->flags&IFF_LOOPBACK))
+               if (!dev || (dev->flags & IFF_LOOPBACK))
                        goto out;
        }
 
        err = -ENODEV;
-       if (dev == NULL)
+       if (!dev)
                goto out;
 
        if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
@@ -1376,7 +1393,7 @@ int ip6_route_add(struct fib6_config *cfg)
                        err = -EINVAL;
                        goto out;
                }
-               ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
+               rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
                rt->rt6i_prefsrc.plen = 128;
        } else
                rt->rt6i_prefsrc.plen = 0;
@@ -1465,7 +1482,7 @@ static int ip6_route_del(struct fib6_config *cfg)
        int err = -ESRCH;
 
        table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
-       if (table == NULL)
+       if (!table)
                return err;
 
        read_lock_bh(&table->tb6_lock);
@@ -1477,7 +1494,7 @@ static int ip6_route_del(struct fib6_config *cfg)
        if (fn) {
                for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
                        if (cfg->fc_ifindex &&
-                           (rt->rt6i_dev == NULL ||
+                           (!rt->rt6i_dev ||
                             rt->rt6i_dev->ifindex != cfg->fc_ifindex))
                                continue;
                        if (cfg->fc_flags & RTF_GATEWAY &&
@@ -1573,7 +1590,7 @@ static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
                },
        };
 
-       ipv6_addr_copy(&rdfl.gateway, gateway);
+       rdfl.gateway = *gateway;
 
        if (rt6_need_strict(dest))
                flags |= RT6_LOOKUP_F_IFACE;
@@ -1618,18 +1635,18 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        dst_confirm(&rt->dst);
 
        /* Duplicate redirect: silently ignore. */
-       if (neigh == dst_get_neighbour_raw(&rt->dst))
+       if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
                goto out;
 
        nrt = ip6_rt_copy(rt, dest);
-       if (nrt == NULL)
+       if (!nrt)
                goto out;
 
        nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
        if (on_link)
                nrt->rt6i_flags &= ~RTF_GATEWAY;
 
-       ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
+       nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
        dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
 
        if (ip6_ins_rt(nrt))
@@ -1639,7 +1656,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
        netevent.new = &nrt->dst;
        call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
 
-       if (rt->rt6i_flags&RTF_CACHE) {
+       if (rt->rt6i_flags & RTF_CACHE) {
                ip6_del_rt(rt);
                return;
        }
@@ -1660,7 +1677,7 @@ static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr
        int allfrag = 0;
 again:
        rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
-       if (rt == NULL)
+       if (!rt)
                return;
 
        if (rt6_check_expired(rt)) {
@@ -1710,7 +1727,7 @@ again:
           1. It is connected route. Action: COW
           2. It is gatewayed route or NONEXTHOP route. Action: clone it.
         */
-       if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
                nrt = rt6_alloc_cow(rt, daddr, saddr);
        else
                nrt = rt6_alloc_clone(rt, daddr);
@@ -1775,7 +1792,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                rt->dst.output = ort->dst.output;
                rt->dst.flags |= DST_HOST;
 
-               ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
+               rt->rt6i_dst.addr = *dest;
                rt->rt6i_dst.plen = 128;
                dst_copy_metrics(&rt->dst, &ort->dst);
                rt->dst.error = ort->dst.error;
@@ -1785,7 +1802,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                rt->dst.lastuse = jiffies;
                rt->rt6i_expires = 0;
 
-               ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
+               rt->rt6i_gateway = ort->rt6i_gateway;
                rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
                rt->rt6i_metric = 0;
 
@@ -1808,7 +1825,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
        struct fib6_table *table;
 
        table = fib6_get_table(net, RT6_TABLE_INFO);
-       if (table == NULL)
+       if (!table)
                return NULL;
 
        write_lock_bh(&table->tb6_lock);
@@ -1848,8 +1865,8 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
                .fc_nlinfo.nl_net = net,
        };
 
-       ipv6_addr_copy(&cfg.fc_dst, prefix);
-       ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+       cfg.fc_dst = *prefix;
+       cfg.fc_gateway = *gwaddr;
 
        /* We should treat it as a default route if prefix length is 0. */
        if (!prefixlen)
@@ -1867,7 +1884,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
        struct fib6_table *table;
 
        table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
-       if (table == NULL)
+       if (!table)
                return NULL;
 
        write_lock_bh(&table->tb6_lock);
@@ -1898,7 +1915,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
                .fc_nlinfo.nl_net = dev_net(dev),
        };
 
-       ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
+       cfg.fc_gateway = *gwaddr;
 
        ip6_route_add(&cfg);
 
@@ -1912,7 +1929,7 @@ void rt6_purge_dflt_routers(struct net *net)
 
        /* NOTE: Keep consistent with rt6_get_dflt_router */
        table = fib6_get_table(net, RT6_TABLE_DFLT);
-       if (table == NULL)
+       if (!table)
                return;
 
 restart:
@@ -1944,9 +1961,9 @@ static void rtmsg_to_fib6_config(struct net *net,
 
        cfg->fc_nlinfo.nl_net = net;
 
-       ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
-       ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
-       ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
+       cfg->fc_dst = rtmsg->rtmsg_dst;
+       cfg->fc_src = rtmsg->rtmsg_src;
+       cfg->fc_gateway = rtmsg->rtmsg_gateway;
 }
 
 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
@@ -2045,14 +2062,14 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
 
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr,
-                                   int anycast)
+                                   bool anycast)
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
                                            net->loopback_dev, 0);
        struct neighbour *neigh;
 
-       if (rt == NULL) {
+       if (!rt) {
                if (net_ratelimit())
                        pr_warning("IPv6:  Maximum number of routes reached,"
                                   " consider increasing route/max_size.\n");
@@ -2072,7 +2089,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                rt->rt6i_flags |= RTF_ANYCAST;
        else
                rt->rt6i_flags |= RTF_LOCAL;
-       neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
+       neigh = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, rt->rt6i_dev);
        if (IS_ERR(neigh)) {
                dst_free(&rt->dst);
 
@@ -2080,7 +2097,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        }
        dst_set_neighbour(&rt->dst, neigh);
 
-       ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+       rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
 
@@ -2098,7 +2115,7 @@ int ip6_route_get_saddr(struct net *net,
        struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
        int err = 0;
        if (rt->rt6i_prefsrc.plen)
-               ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
+               *saddr = rt->rt6i_prefsrc.addr;
        else
                err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
                                         daddr, prefs, saddr);
@@ -2118,7 +2135,7 @@ static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
        struct net *net = ((struct arg_dev_net_ip *)arg)->net;
        struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
 
-       if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
+       if (((void *)rt->rt6i_dev == dev || !dev) &&
            rt != net->ipv6.ip6_null_entry &&
            ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
                /* remove prefsrc entry */
@@ -2148,7 +2165,7 @@ static int fib6_ifdown(struct rt6_info *rt, void *arg)
        const struct arg_dev_net *adn = arg;
        const struct net_device *dev = adn->dev;
 
-       if ((rt->rt6i_dev == dev || dev == NULL) &&
+       if ((rt->rt6i_dev == dev || !dev) &&
            rt != adn->net->ipv6.ip6_null_entry) {
                RT6_TRACE("deleted by ifdown %p\n", rt);
                return -1;
@@ -2185,7 +2202,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
        */
 
        idev = __in6_dev_get(arg->dev);
-       if (idev == NULL)
+       if (!idev)
                return 0;
 
        /* For administrative MTU increase, there is no way to discover
@@ -2365,7 +2382,7 @@ static int rt6_fill_node(struct net *net,
        }
 
        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
-       if (nlh == NULL)
+       if (!nlh)
                return -EMSGSIZE;
 
        rtm = nlmsg_data(nlh);
@@ -2379,25 +2396,25 @@ static int rt6_fill_node(struct net *net,
                table = RT6_TABLE_UNSPEC;
        rtm->rtm_table = table;
        NLA_PUT_U32(skb, RTA_TABLE, table);
-       if (rt->rt6i_flags&RTF_REJECT)
+       if (rt->rt6i_flags & RTF_REJECT)
                rtm->rtm_type = RTN_UNREACHABLE;
-       else if (rt->rt6i_flags&RTF_LOCAL)
+       else if (rt->rt6i_flags & RTF_LOCAL)
                rtm->rtm_type = RTN_LOCAL;
-       else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
+       else if (rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
                rtm->rtm_type = RTN_LOCAL;
        else
                rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
-       if (rt->rt6i_flags&RTF_DYNAMIC)
+       if (rt->rt6i_flags & RTF_DYNAMIC)
                rtm->rtm_protocol = RTPROT_REDIRECT;
        else if (rt->rt6i_flags & RTF_ADDRCONF)
                rtm->rtm_protocol = RTPROT_KERNEL;
-       else if (rt->rt6i_flags&RTF_DEFAULT)
+       else if (rt->rt6i_flags & RTF_DEFAULT)
                rtm->rtm_protocol = RTPROT_RA;
 
-       if (rt->rt6i_flags&RTF_CACHE)
+       if (rt->rt6i_flags & RTF_CACHE)
                rtm->rtm_flags |= RTM_F_CLONED;
 
        if (dst) {
@@ -2437,7 +2454,7 @@ static int rt6_fill_node(struct net *net,
 
        if (rt->rt6i_prefsrc.plen) {
                struct in6_addr saddr_buf;
-               ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
+               saddr_buf = rt->rt6i_prefsrc.addr;
                NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
        }
 
@@ -2445,7 +2462,7 @@ static int rt6_fill_node(struct net *net,
                goto nla_put_failure;
 
        rcu_read_lock();
-       n = dst_get_neighbour(&rt->dst);
+       n = dst_get_neighbour_noref(&rt->dst);
        if (n)
                NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
        rcu_read_unlock();
@@ -2511,14 +2528,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
                if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
                        goto errout;
 
-               ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC]));
+               fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
        }
 
        if (tb[RTA_DST]) {
                if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
                        goto errout;
 
-               ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST]));
+               fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
        }
 
        if (tb[RTA_IIF])
@@ -2537,7 +2554,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
        }
 
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (skb == NULL) {
+       if (!skb) {
                err = -ENOBUFS;
                goto errout;
        }
@@ -2572,10 +2589,10 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
        int err;
 
        err = -ENOBUFS;
-       seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
+       seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
        skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
-       if (skb == NULL)
+       if (!skb)
                goto errout;
 
        err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
@@ -2642,7 +2659,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
        seq_puts(m, "00000000000000000000000000000000 00 ");
 #endif
        rcu_read_lock();
-       n = dst_get_neighbour(&rt->dst);
+       n = dst_get_neighbour_noref(&rt->dst);
        if (n) {
                seq_printf(m, "%pi6", n->primary_key);
        } else {
index 96f3623..3b6dac9 100644 (file)
@@ -91,7 +91,7 @@ struct pcpu_tstats {
        unsigned long   rx_bytes;
        unsigned long   tx_packets;
        unsigned long   tx_bytes;
-};
+} __attribute__((aligned(4*sizeof(unsigned long))));
 
 static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
 {
@@ -682,7 +682,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = dst_get_neighbour(skb_dst(skb));
+                       neigh = dst_get_neighbour_noref(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -707,7 +707,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                struct neighbour *neigh = NULL;
 
                if (skb_dst(skb))
-                       neigh = dst_get_neighbour(skb_dst(skb));
+                       neigh = dst_get_neighbour_noref(skb_dst(skb));
 
                if (neigh == NULL) {
                        if (net_ratelimit())
@@ -916,7 +916,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                                goto done;
 #ifdef CONFIG_IPV6_SIT_6RD
                } else {
-                       ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix);
+                       ip6rd.prefix = t->ip6rd.prefix;
                        ip6rd.relay_prefix = t->ip6rd.relay_prefix;
                        ip6rd.prefixlen = t->ip6rd.prefixlen;
                        ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
@@ -1084,7 +1084,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (relay_prefix != ip6rd.relay_prefix)
                                goto done;
 
-                       ipv6_addr_copy(&t->ip6rd.prefix, &prefix);
+                       t->ip6rd.prefix = prefix;
                        t->ip6rd.relay_prefix = relay_prefix;
                        t->ip6rd.prefixlen = ip6rd.prefixlen;
                        t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
index 5a0d664..8e951d8 100644 (file)
@@ -200,8 +200,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        req->mss = mss;
        ireq->rmt_port = th->source;
        ireq->loc_port = th->dest;
-       ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr);
+       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -237,9 +237,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                struct flowi6 fl6;
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_TCP;
-               ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
+               fl6.daddr = ireq6->rmt_addr;
                final_p = fl6_update_dst(&fl6, np->opt, &final);
-               ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
+               fl6.saddr = ireq6->loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = sk->sk_mark;
                fl6.fl6_dport = inet_rsk(req)->rmt_port;
index 2dea4bb..906c7ca 100644 (file)
@@ -62,6 +62,7 @@
 #include <net/netdma.h>
 #include <net/inet_common.h>
 #include <net/secure_seq.h>
+#include <net/tcp_memcontrol.h>
 
 #include <asm/uaccess.h>
 
@@ -153,7 +154,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                        if (flowlabel == NULL)
                                return -EINVAL;
-                       ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
+                       usin->sin6_addr = flowlabel->dst;
                        fl6_sock_release(flowlabel);
                }
        }
@@ -195,7 +196,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                tp->write_seq = 0;
        }
 
-       ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
+       np->daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -244,9 +245,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                saddr = &np->rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-       ipv6_addr_copy(&fl6.saddr,
-                      (saddr ? saddr : &np->saddr));
+       fl6.daddr = np->daddr;
+       fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
        fl6.fl6_dport = usin->sin6_port;
@@ -264,11 +264,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               ipv6_addr_copy(&np->rcv_saddr, saddr);
+               np->rcv_saddr = *saddr;
        }
 
        /* set the source address */
-       ipv6_addr_copy(&np->saddr, saddr);
+       np->saddr = *saddr;
        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 
        sk->sk_gso_type = SKB_GSO_TCPV6;
@@ -398,8 +398,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                         */
                        memset(&fl6, 0, sizeof(fl6));
                        fl6.flowi6_proto = IPPROTO_TCP;
-                       ipv6_addr_copy(&fl6.daddr, &np->daddr);
-                       ipv6_addr_copy(&fl6.saddr, &np->saddr);
+                       fl6.daddr = np->daddr;
+                       fl6.saddr = np->saddr;
                        fl6.flowi6_oif = sk->sk_bound_dev_if;
                        fl6.flowi6_mark = sk->sk_mark;
                        fl6.fl6_dport = inet->inet_dport;
@@ -489,8 +489,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_TCP;
-       ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
-       ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
+       fl6.daddr = treq->rmt_addr;
+       fl6.saddr = treq->loc_addr;
        fl6.flowlabel = 0;
        fl6.flowi6_oif = treq->iif;
        fl6.flowi6_mark = sk->sk_mark;
@@ -512,7 +512,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
        if (skb) {
                __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
 
-               ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
+               fl6.daddr = treq->rmt_addr;
                err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -617,8 +617,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
                        tp->md5sig_info->alloced6++;
                }
 
-               ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
-                              peer);
+               tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
                tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
                tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
 
@@ -750,8 +749,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 
        bp = &hp->md5_blk.ip6;
        /* 1. TCP pseudo-header (RFC2460) */
-       ipv6_addr_copy(&bp->saddr, saddr);
-       ipv6_addr_copy(&bp->daddr, daddr);
+       bp->saddr = *saddr;
+       bp->daddr = *daddr;
        bp->protocol = cpu_to_be32(IPPROTO_TCP);
        bp->len = cpu_to_be32(nbytes);
 
@@ -1039,8 +1038,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
 #endif
 
        memset(&fl6, 0, sizeof(fl6));
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
+       fl6.daddr = ipv6_hdr(skb)->saddr;
+       fl6.saddr = ipv6_hdr(skb)->daddr;
 
        buff->ip_summed = CHECKSUM_PARTIAL;
        buff->csum = 0;
@@ -1250,8 +1249,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_openreq_init(req, &tmp_opt, skb);
 
        treq = inet6_rsk(req);
-       ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
-       ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
+       treq->rmt_addr = ipv6_hdr(skb)->saddr;
+       treq->loc_addr = ipv6_hdr(skb)->daddr;
        if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, tcp_hdr(skb));
 
@@ -1381,7 +1380,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
+               newnp->rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1445,9 +1444,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
-       ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
-       ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
+       newnp->daddr = treq->rmt_addr;
+       newnp->saddr = treq->loc_addr;
+       newnp->rcv_saddr = treq->loc_addr;
        newsk->sk_bound_dev_if = treq->iif;
 
        /* Now IPv6 options...
@@ -1996,7 +1995,8 @@ static int tcp_v6_init_sock(struct sock *sk)
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
        local_bh_disable();
-       percpu_counter_inc(&tcp_sockets_allocated);
+       sock_update_memcg(sk);
+       sk_sockets_allocated_inc(sk);
        local_bh_enable();
 
        return 0;
@@ -2215,7 +2215,6 @@ struct proto tcpv6_prot = {
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .orphan_count           = &tcp_orphan_count,
-       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
@@ -2229,6 +2228,9 @@ struct proto tcpv6_prot = {
        .compat_setsockopt      = compat_tcp_setsockopt,
        .compat_getsockopt      = compat_tcp_getsockopt,
 #endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+       .proto_cgroup           = tcp_proto_cgroup,
+#endif
 };
 
 static const struct inet6_protocol tcpv6_protocol = {
index 8c25419..4f96b5c 100644 (file)
@@ -238,7 +238,7 @@ exact_match:
        return result;
 }
 
-static struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(struct net *net,
                                      const struct in6_addr *saddr, __be16 sport,
                                      const struct in6_addr *daddr, __be16 dport,
                                      int dif, struct udp_table *udptable)
@@ -305,6 +305,7 @@ begin:
        rcu_read_unlock();
        return result;
 }
+EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 
 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
                                          __be16 sport, __be16 dport,
@@ -418,8 +419,7 @@ try_again:
                        ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
                                               &sin6->sin6_addr);
                else {
-                       ipv6_addr_copy(&sin6->sin6_addr,
-                                      &ipv6_hdr(skb)->saddr);
+                       sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin6->sin6_scope_id = IP6CB(skb)->iif;
                }
@@ -539,7 +539,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
+       skb_dst_drop(skb);
+       rc = sock_queue_rcv_skb(sk, skb);
+       if (rc < 0) {
                /* Note that an ENOMEM error is charged twice */
                if (rc == -ENOMEM)
                        UDP6_INC_STATS_BH(sock_net(sk),
@@ -1114,11 +1116,11 @@ do_udp_sendmsg:
 
        fl6.flowi6_proto = sk->sk_protocol;
        if (!ipv6_addr_any(daddr))
-               ipv6_addr_copy(&fl6.daddr, daddr);
+               fl6.daddr = *daddr;
        else
                fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
        if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
-               ipv6_addr_copy(&fl6.saddr, &np->saddr);
+               fl6.saddr = np->saddr;
        fl6.fl6_sport = inet->inet_sport;
 
        final_p = fl6_update_dst(&fl6, opt, &final);
@@ -1299,7 +1301,8 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
index 3437d7d..a81ce94 100644 (file)
@@ -72,8 +72,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
                top_iph->nexthdr = IPPROTO_BEETPH;
        }
 
-       ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
-       ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
+       top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+       top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
        return 0;
 }
 
@@ -99,8 +99,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
 
        ip6h = ipv6_hdr(skb);
        ip6h->payload_len = htons(skb->len - size);
-       ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
-       ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
+       ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6;
+       ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6;
        err = 0;
 out:
        return err;
index 4d6edff..261e6e6 100644 (file)
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
                dsfield &= ~INET_ECN_MASK;
        ipv6_change_dsfield(top_iph, 0, dsfield);
        top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
-       ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
-       ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
+       top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
+       top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
        return 0;
 }
 
index faae417..4eeff89 100644 (file)
@@ -49,7 +49,7 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
        struct sock *sk = skb->sk;
 
        fl6.flowi6_oif = sk->sk_bound_dev_if;
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+       fl6.daddr = ipv6_hdr(skb)->daddr;
 
        ipv6_local_rxpmtu(sk, &fl6, mtu);
 }
@@ -60,7 +60,7 @@ static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
        struct sock *sk = skb->sk;
 
        fl6.fl6_dport = inet_sk(sk)->inet_dport;
-       ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+       fl6.daddr = ipv6_hdr(skb)->daddr;
 
        ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
 }
index d879f7e..8ea65e0 100644 (file)
@@ -132,8 +132,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
        memset(fl6, 0, sizeof(struct flowi6));
        fl6->flowi6_mark = skb->mark;
 
-       ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr);
-       ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr);
+       fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
+       fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
 
        while (nh + offset + 1 < skb->data ||
               pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
index f2d72b8..3f2f7c4 100644 (file)
@@ -27,8 +27,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
 
        /* Initialize temporary selector matching only
         * to current session. */
-       ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr);
-       ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr);
+       *(struct in6_addr *)&sel->daddr = fl6->daddr;
+       *(struct in6_addr *)&sel->saddr = fl6->saddr;
        sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
        sel->dport_mask = htons(0xffff);
        sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
index 32e3bb0..5c93f29 100644 (file)
@@ -1461,14 +1461,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
        }
 
        /* Allocate a new instance */
-       new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
+       new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC);
        if (!new) {
                IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__);
                spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
                return NULL;
        }
-       /* Dup */
-       memcpy(new, orig, sizeof(struct tsap_cb));
        spin_lock_init(&new->lock);
 
        /* We don't need the old instance any more */
index 1e733e9..11dbb22 100644 (file)
@@ -375,7 +375,7 @@ static int verify_address_len(const void *p)
        const struct sadb_address *sp = p;
        const struct sockaddr *addr = (const struct sockaddr *)(sp + 1);
        const struct sockaddr_in *sin;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        const struct sockaddr_in6 *sin6;
 #endif
        int len;
@@ -387,7 +387,7 @@ static int verify_address_len(const void *p)
                    sp->sadb_address_prefixlen > 32)
                        return -EINVAL;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t));
                if (sp->sadb_address_len != len ||
@@ -469,7 +469,7 @@ static int present_and_same_family(const struct sadb_address *src,
        if (s_addr->sa_family != d_addr->sa_family)
                return 0;
        if (s_addr->sa_family != AF_INET
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
            && s_addr->sa_family != AF_INET6
 #endif
                )
@@ -579,7 +579,7 @@ static inline int pfkey_sockaddr_len(sa_family_t family)
        switch (family) {
        case AF_INET:
                return sizeof(struct sockaddr_in);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                return sizeof(struct sockaddr_in6);
 #endif
@@ -595,7 +595,7 @@ int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr)
                xaddr->a4 =
                        ((struct sockaddr_in *)sa)->sin_addr.s_addr;
                return AF_INET;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                memcpy(xaddr->a6,
                       &((struct sockaddr_in6 *)sa)->sin6_addr,
@@ -639,7 +639,7 @@ static struct  xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct
        case AF_INET:
                xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr;
                break;
@@ -705,14 +705,14 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
                memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
                return 32;
            }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
            {
                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
                sin6->sin6_family = AF_INET6;
                sin6->sin6_port = port;
                sin6->sin6_flowinfo = 0;
-               ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
+               sin6->sin6_addr = *(struct in6_addr *)xaddr->a6;
                sin6->sin6_scope_id = 0;
                return 128;
            }
@@ -1311,7 +1311,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
                xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr;
                xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr;
                xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr;
@@ -3146,7 +3146,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
                        return NULL;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                if (opt != IPV6_IPSEC_POLICY) {
                        *dir = -EOPNOTSUPP;
index 7d3b438..96ddb72 100644 (file)
@@ -247,15 +247,3 @@ config MAC80211_DEBUG_COUNTERS
          and show them in debugfs.
 
          If unsure, say N.
-
-config MAC80211_DRIVER_API_TRACER
-       bool "Driver API tracer"
-       depends on MAC80211_DEBUG_MENU
-       depends on EVENT_TRACING
-       help
-         Say Y here to make mac80211 register with the ftrace
-         framework for the driver API -- you can then see which
-         driver methods it is calling and which API functions
-         drivers are calling by looking at the trace.
-
-         If unsure, say Y.
index fdb54e6..d540c3b 100644 (file)
@@ -24,7 +24,8 @@ mac80211-y := \
        util.o \
        wme.o \
        event.o \
-       chan.o
+       chan.o \
+       driver-trace.o
 
 mac80211-$(CONFIG_MAC80211_LEDS) += led.o
 mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -41,7 +42,6 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
 
 mac80211-$(CONFIG_PM) += pm.o
 
-mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o
 CFLAGS_driver-trace.o := -I$(src)
 
 # objects for PID algorithm
index 93b2434..96debba 100644 (file)
@@ -73,8 +73,11 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
        RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
-       printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
-              sta->sta.addr, tid);
+       printk(KERN_DEBUG
+              "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
+              sta->sta.addr, tid,
+              initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
+              (int)reason);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
        if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
@@ -85,7 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
        /* check if this is a self generated aggregation halt */
        if (initiator == WLAN_BACK_RECIPIENT && tx)
                ieee80211_send_delba(sta->sdata, sta->sta.addr,
-                                    tid, 0, reason);
+                                    tid, WLAN_BACK_RECIPIENT, reason);
 
        del_timer_sync(&tid_rx->session_timer);
        del_timer_sync(&tid_rx->reorder_timer);
@@ -109,7 +112,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
        int i;
 
        rcu_read_lock();
-       sta = sta_info_get(sdata, addr);
+       sta = sta_info_get_bss(sdata, addr);
        if (!sta) {
                rcu_read_unlock();
                return;
@@ -177,10 +180,13 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
        memcpy(mgmt->da, da, ETH_ALEN);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        if (sdata->vif.type == NL80211_IFTYPE_AP ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+           sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
index 2e4b961..7380287 100644 (file)
@@ -78,10 +78,13 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
        memcpy(mgmt->da, da, ETH_ALEN);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        if (sdata->vif.type == NL80211_IFTYPE_AP ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+           sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
@@ -185,6 +188,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
        del_timer_sync(&tid_tx->addba_resp_timer);
+       del_timer_sync(&tid_tx->session_timer);
 
        /*
         * After this packets are no longer handed right through
@@ -396,6 +400,28 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                                     tid_tx->timeout);
 }
 
+/*
+ * After accepting the AddBA Response we activated a timer,
+ * resetting it after each frame that we send.
+ */
+static void sta_tx_agg_session_timer_expired(unsigned long data)
+{
+       /* not an elegant detour, but there is no choice as the timer passes
+        * only one argument, and various sta_info are needed here, so init
+        * flow in sta_info_create gives the TID as data, while the timer_to_id
+        * array gives the sta through container_of */
+       u8 *ptid = (u8 *)data;
+       u8 *timer_to_id = ptid - *ptid;
+       struct sta_info *sta = container_of(timer_to_id, struct sta_info,
+                                        timer_to_tid[0]);
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+       printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
+#endif
+
+       ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+}
+
 int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                                  u16 timeout)
 {
@@ -420,15 +446,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
               pubsta->addr, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
-       /*
-        * The aggregation code is not prepared to handle
-        * anything but STA/AP due to the BSSID handling.
-        * IBSS could work in the code but isn't supported
-        * by drivers or the standard.
-        */
        if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+           sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
            sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
-           sdata->vif.type != NL80211_IFTYPE_AP)
+           sdata->vif.type != NL80211_IFTYPE_AP &&
+           sdata->vif.type != NL80211_IFTYPE_ADHOC)
                return -EINVAL;
 
        if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
@@ -439,6 +461,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                return -EINVAL;
        }
 
+       /*
+        * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
+        * member of an IBSS, and has no other existing Block Ack agreement
+        * with the recipient STA, then the initiating STA shall transmit a
+        * Probe Request frame to the recipient STA and shall not transmit an
+        * ADDBA Request frame unless it receives a Probe Response frame
+        * from the recipient within dot11ADDBAFailureTimeout.
+        *
+        * The probe request mechanism for ADDBA is currently not implemented,
+        * but we only build up Block Ack session with HT STAs. This information
+        * is set when we receive a bss info from a probe response or a beacon.
+        */
+       if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+           !sta->sta.ht_cap.ht_supported) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+               printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
+                      "does not advertise HT support\n", pubsta->addr);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+               return -EINVAL;
+       }
+
        spin_lock_bh(&sta->lock);
 
        /* we have tried too many times, receiver does not want A-MPDU */
@@ -470,11 +513,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
 
        tid_tx->timeout = timeout;
 
-       /* Tx timer */
+       /* response timer */
        tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
        tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
        init_timer(&tid_tx->addba_resp_timer);
 
+       /* tx timer */
+       tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
+       tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
+       init_timer(&tid_tx->session_timer);
+
        /* assign a dialog token */
        sta->ampdu_mlme.dialog_token_allocator++;
        tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
@@ -547,7 +595,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
        }
 
        mutex_lock(&local->sta_mtx);
-       sta = sta_info_get(sdata, ra);
+       sta = sta_info_get_bss(sdata, ra);
        if (!sta) {
                mutex_unlock(&local->sta_mtx);
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -676,7 +724,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
 
        mutex_lock(&local->sta_mtx);
 
-       sta = sta_info_get(sdata, ra);
+       sta = sta_info_get_bss(sdata, ra);
        if (!sta) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -814,6 +862,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                        ieee80211_agg_tx_operational(local, sta, tid);
 
                sta->ampdu_mlme.addba_req_num[tid] = 0;
+
+               if (tid_tx->timeout)
+                       mod_timer(&tid_tx->session_timer,
+                                 TU_TO_EXP_TIME(tid_tx->timeout));
+
        } else {
                ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
                                                true);
index d06c65f..393b2a4 100644 (file)
@@ -102,6 +102,16 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
        return 0;
 }
 
+static int ieee80211_set_noack_map(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 u16 noack_map)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+       sdata->noack_map = noack_map;
+       return 0;
+}
+
 static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                             u8 key_idx, bool pairwise, const u8 *mac_addr,
                             struct key_params *params)
@@ -411,7 +421,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                                BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
                                BIT(NL80211_STA_FLAG_WME) |
                                BIT(NL80211_STA_FLAG_MFP) |
-                               BIT(NL80211_STA_FLAG_AUTHENTICATED);
+                               BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+                               BIT(NL80211_STA_FLAG_TDLS_PEER);
        if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
                sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
        if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
@@ -422,6 +433,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
        if (test_sta_flag(sta, WLAN_STA_AUTH))
                sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
 }
 
 
@@ -488,6 +501,31 @@ static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
                (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
 }
 
+static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
+                                   u8 *resp, size_t resp_len)
+{
+       struct sk_buff *new, *old;
+
+       if (!resp || !resp_len)
+               return -EINVAL;
+
+       old = rtnl_dereference(sdata->u.ap.probe_resp);
+
+       new = dev_alloc_skb(resp_len);
+       if (!new)
+               return -ENOMEM;
+
+       memcpy(skb_put(new, resp_len), resp, resp_len);
+
+       rcu_assign_pointer(sdata->u.ap.probe_resp, new);
+       synchronize_rcu();
+
+       if (old)
+               dev_kfree_skb(old);
+
+       return 0;
+}
+
 /*
  * This handles both adding a beacon and setting new beacon info
  */
@@ -498,6 +536,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
        int new_head_len, new_tail_len;
        int size;
        int err = -EINVAL;
+       u32 changed = 0;
 
        old = rtnl_dereference(sdata->u.ap.beacon);
 
@@ -581,11 +620,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
 
        kfree(old);
 
+       err = ieee80211_set_probe_resp(sdata, params->probe_resp,
+                                      params->probe_resp_len);
+       if (!err)
+               changed |= BSS_CHANGED_AP_PROBE_RESP;
+
        ieee80211_config_ap_ssid(sdata, params);
+       changed |= BSS_CHANGED_BEACON_ENABLED |
+                  BSS_CHANGED_BEACON |
+                  BSS_CHANGED_SSID;
 
-       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
-                                               BSS_CHANGED_BEACON |
-                                               BSS_CHANGED_SSID);
+       ieee80211_bss_info_change_notify(sdata, changed);
        return 0;
 }
 
@@ -594,6 +639,8 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
 {
        struct ieee80211_sub_if_data *sdata;
        struct beacon_data *old;
+       struct ieee80211_sub_if_data *vlan;
+       int ret;
 
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
@@ -601,7 +648,24 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
        if (old)
                return -EALREADY;
 
-       return ieee80211_config_beacon(sdata, params);
+       ret = ieee80211_config_beacon(sdata, params);
+       if (ret)
+               return ret;
+
+       /*
+        * Apply control port protocol, this allows us to
+        * not encrypt dynamic WEP control frames.
+        */
+       sdata->control_port_protocol = params->crypto.control_port_ethertype;
+       sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt;
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
+               vlan->control_port_protocol =
+                       params->crypto.control_port_ethertype;
+               vlan->control_port_no_encrypt =
+                       params->crypto.control_port_no_encrypt;
+       }
+
+       return 0;
 }
 
 static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -778,7 +842,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
        }
 
        if (params->ht_capa)
-               ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+               ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                                  params->ht_capa,
                                                  &sta->sta.ht_cap);
 
@@ -847,7 +911,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
 
        sta_apply_parameters(local, sta, params);
 
-       rate_control_rate_init(sta);
+       /*
+        * for TDLS, rate control should be initialized only when supported
+        * rates are known.
+        */
+       if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+               rate_control_rate_init(sta);
 
        layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
                sdata->vif.type == NL80211_IFTYPE_AP;
@@ -931,6 +1000,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
 
        sta_apply_parameters(local, sta, params);
 
+       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
+               rate_control_rate_init(sta);
+
        rcu_read_unlock();
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -1123,6 +1195,8 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
 {
        u8 *new_ie;
        const u8 *old_ie;
+       struct ieee80211_sub_if_data *sdata = container_of(ifmsh,
+                                       struct ieee80211_sub_if_data, u.mesh);
 
        /* allocate information elements */
        new_ie = NULL;
@@ -1149,6 +1223,10 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
        if (setup->is_secure)
                ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
 
+       /* mcast rate setting in Mesh Node */
+       memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
+                                               sizeof(setup->mcast_rate));
+
        return 0;
 }
 
@@ -1194,6 +1272,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
        if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask))
                conf->dot11MeshHWMPpreqMinInterval =
                        nconf->dot11MeshHWMPpreqMinInterval;
+       if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, mask))
+               conf->dot11MeshHWMPperrMinInterval =
+                       nconf->dot11MeshHWMPperrMinInterval;
        if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
                           mask))
                conf->dot11MeshHWMPnetDiameterTraversalTime =
@@ -1394,7 +1475,7 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
            (old_oper_type != local->_oper_channel_type))
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-       if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+       if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
            old_vif_oper_type != sdata->vif.bss_conf.channel_type)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
 
@@ -1917,7 +1998,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
                             enum nl80211_channel_type channel_type,
                             bool channel_type_valid, unsigned int wait,
                             const u8 *buf, size_t len, bool no_cck,
-                            u64 *cookie)
+                            bool dont_wait_for_ack, u64 *cookie)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
@@ -1925,10 +2006,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
        struct sta_info *sta;
        struct ieee80211_work *wk;
        const struct ieee80211_mgmt *mgmt = (void *)buf;
-       u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
-                   IEEE80211_TX_CTL_REQ_TX_STATUS;
+       u32 flags;
        bool is_offchan = false;
 
+       if (dont_wait_for_ack)
+               flags = IEEE80211_TX_CTL_NO_ACK;
+       else
+               flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+                       IEEE80211_TX_CTL_REQ_TX_STATUS;
+
        /* Check that we are on the requested channel for transmission */
        if (chan != local->tmp_channel &&
            chan != local->oper_channel)
@@ -2488,6 +2574,82 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 }
 
+static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
+                                 const u8 *peer, u64 *cookie)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_qos_hdr *nullfunc;
+       struct sk_buff *skb;
+       int size = sizeof(*nullfunc);
+       __le16 fc;
+       bool qos;
+       struct ieee80211_tx_info *info;
+       struct sta_info *sta;
+
+       rcu_read_lock();
+       sta = sta_info_get(sdata, peer);
+       if (sta) {
+               qos = test_sta_flag(sta, WLAN_STA_WME);
+               rcu_read_unlock();
+       } else {
+               rcu_read_unlock();
+               return -ENOLINK;
+       }
+
+       if (qos) {
+               fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                IEEE80211_STYPE_QOS_NULLFUNC |
+                                IEEE80211_FCTL_FROMDS);
+       } else {
+               size -= 2;
+               fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+                                IEEE80211_STYPE_NULLFUNC |
+                                IEEE80211_FCTL_FROMDS);
+       }
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb->dev = dev;
+
+       skb_reserve(skb, local->hw.extra_tx_headroom);
+
+       nullfunc = (void *) skb_put(skb, size);
+       nullfunc->frame_control = fc;
+       nullfunc->duration_id = 0;
+       memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+       memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+       memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+       nullfunc->seq_ctrl = 0;
+
+       info = IEEE80211_SKB_CB(skb);
+
+       info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
+                      IEEE80211_TX_INTFL_NL80211_FRAME_TX;
+
+       skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+       skb->priority = 7;
+       if (qos)
+               nullfunc->qos_ctrl = cpu_to_le16(7);
+
+       local_bh_disable();
+       ieee80211_xmit(sdata, skb);
+       local_bh_enable();
+
+       *cookie = (unsigned long) skb;
+       return 0;
+}
+
+static struct ieee80211_channel *
+ieee80211_wiphy_get_channel(struct wiphy *wiphy)
+{
+       struct ieee80211_local *local = wiphy_priv(wiphy);
+
+       return local->oper_channel;
+}
+
 struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
@@ -2553,4 +2715,7 @@ struct cfg80211_ops mac80211_config_ops = {
        .set_rekey_data = ieee80211_set_rekey_data,
        .tdls_oper = ieee80211_tdls_oper,
        .tdls_mgmt = ieee80211_tdls_mgmt,
+       .probe_client = ieee80211_probe_client,
+       .get_channel = ieee80211_wiphy_get_channel,
+       .set_noack_map = ieee80211_set_noack_map,
 };
index 883996b..90baea5 100644 (file)
@@ -97,40 +97,6 @@ static const struct file_operations reset_ops = {
        .llseek = noop_llseek,
 };
 
-static ssize_t noack_read(struct file *file, char __user *user_buf,
-                         size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-
-       return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
-                                     local->wifi_wme_noack_test);
-}
-
-static ssize_t noack_write(struct file *file,
-                          const char __user *user_buf,
-                          size_t count, loff_t *ppos)
-{
-       struct ieee80211_local *local = file->private_data;
-       char buf[10];
-       size_t len;
-
-       len = min(count, sizeof(buf) - 1);
-       if (copy_from_user(buf, user_buf, len))
-               return -EFAULT;
-       buf[len] = '\0';
-
-       local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
-
-       return count;
-}
-
-static const struct file_operations noack_ops = {
-       .read = noack_read,
-       .write = noack_write,
-       .open = mac80211_open_file_generic,
-       .llseek = default_llseek,
-};
-
 static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
                                 size_t count, loff_t *ppos)
 {
@@ -190,7 +156,7 @@ static ssize_t uapsd_max_sp_len_write(struct file *file,
                return -EFAULT;
        buf[len] = '\0';
 
-       ret = strict_strtoul(buf, 0, &val);
+       ret = kstrtoul(buf, 0, &val);
 
        if (ret)
                return -EINVAL;
@@ -398,7 +364,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
        DEBUGFS_ADD(wep_iv);
        DEBUGFS_ADD(queues);
        DEBUGFS_ADD_MODE(reset, 0200);
-       DEBUGFS_ADD(noack);
        DEBUGFS_ADD(uapsd_queues);
        DEBUGFS_ADD(uapsd_max_sp_len);
        DEBUGFS_ADD(channel_type);
index 9352819..8df2891 100644 (file)
@@ -405,6 +405,8 @@ IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
                u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
                u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
+               u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
                u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
 IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
@@ -534,6 +536,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
        MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
        MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
        MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval);
+       MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval);
        MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime);
        MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
        MESHPARAMS_ADD(path_refresh_time);
index 3110cbd..2406b3e 100644 (file)
@@ -63,10 +63,10 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
        test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
 
        int res = scnprintf(buf, sizeof(buf),
-                           "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+                           "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                            TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
                            TEST(PS_DRIVER), TEST(AUTHORIZED),
-                           TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
+                           TEST(SHORT_PREAMBLE),
                            TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
                            TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
                            TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
index 5f165d7..e8960ae 100644 (file)
@@ -5,11 +5,34 @@
 #include "ieee80211_i.h"
 #include "driver-trace.h"
 
+static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
+{
+       WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER));
+}
+
+static inline struct ieee80211_sub_if_data *
+get_bss_sdata(struct ieee80211_sub_if_data *sdata)
+{
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
+                                    u.ap);
+
+       return sdata;
+}
+
 static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
 {
        local->ops->tx(&local->hw, skb);
 }
 
+static inline void drv_tx_frags(struct ieee80211_local *local,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta,
+                               struct sk_buff_head *skbs)
+{
+       local->ops->tx_frags(&local->hw, vif, sta, skbs);
+}
+
 static inline int drv_start(struct ieee80211_local *local)
 {
        int ret;
@@ -69,15 +92,23 @@ static inline int drv_resume(struct ieee80211_local *local)
 #endif
 
 static inline int drv_add_interface(struct ieee80211_local *local,
-                                   struct ieee80211_vif *vif)
+                                   struct ieee80211_sub_if_data *sdata)
 {
        int ret;
 
        might_sleep();
 
-       trace_drv_add_interface(local, vif_to_sdata(vif));
-       ret = local->ops->add_interface(&local->hw, vif);
+       if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+                   sdata->vif.type == NL80211_IFTYPE_MONITOR))
+               return -EINVAL;
+
+       trace_drv_add_interface(local, sdata);
+       ret = local->ops->add_interface(&local->hw, &sdata->vif);
        trace_drv_return_int(local, ret);
+
+       if (ret == 0)
+               sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+
        return ret;
 }
 
@@ -89,6 +120,8 @@ static inline int drv_change_interface(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_change_interface(local, sdata, type, p2p);
        ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
        trace_drv_return_int(local, ret);
@@ -96,12 +129,15 @@ static inline int drv_change_interface(struct ieee80211_local *local,
 }
 
 static inline void drv_remove_interface(struct ieee80211_local *local,
-                                       struct ieee80211_vif *vif)
+                                       struct ieee80211_sub_if_data *sdata)
 {
        might_sleep();
 
-       trace_drv_remove_interface(local, vif_to_sdata(vif));
-       local->ops->remove_interface(&local->hw, vif);
+       check_sdata_in_driver(sdata);
+
+       trace_drv_remove_interface(local, sdata);
+       local->ops->remove_interface(&local->hw, &sdata->vif);
+       sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
        trace_drv_return_void(local);
 }
 
@@ -124,6 +160,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
 {
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_bss_info_changed(local, sdata, info, changed);
        if (local->ops->bss_info_changed)
                local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
@@ -139,6 +177,8 @@ static inline int drv_tx_sync(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_tx_sync(local, sdata, bssid, type);
        if (local->ops->tx_sync)
                ret = local->ops->tx_sync(&local->hw, &sdata->vif,
@@ -154,6 +194,8 @@ static inline void drv_finish_tx_sync(struct ieee80211_local *local,
 {
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_finish_tx_sync(local, sdata, bssid, type);
        if (local->ops->finish_tx_sync)
                local->ops->finish_tx_sync(&local->hw, &sdata->vif,
@@ -211,6 +253,8 @@ static inline int drv_set_key(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_set_key(local, cmd, sdata, sta, key);
        ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
        trace_drv_return_int(local, ret);
@@ -228,6 +272,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
        if (sta)
                ista = &sta->sta;
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
        if (local->ops->update_tkip_key)
                local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
@@ -243,6 +289,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_hw_scan(local, sdata);
        ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
        trace_drv_return_int(local, ret);
@@ -254,6 +302,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
 {
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_cancel_hw_scan(local, sdata);
        local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
        trace_drv_return_void(local);
@@ -269,6 +319,8 @@ drv_sched_scan_start(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_sched_scan_start(local, sdata);
        ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
                                              req, ies);
@@ -281,6 +333,8 @@ static inline void drv_sched_scan_stop(struct ieee80211_local *local,
 {
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_sched_scan_stop(local, sdata);
        local->ops->sched_scan_stop(&local->hw, &sdata->vif);
        trace_drv_return_void(local);
@@ -377,6 +431,9 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
                                  enum sta_notify_cmd cmd,
                                  struct ieee80211_sta *sta)
 {
+       sdata = get_bss_sdata(sdata);
+       check_sdata_in_driver(sdata);
+
        trace_drv_sta_notify(local, sdata, cmd, sta);
        if (local->ops->sta_notify)
                local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
@@ -391,6 +448,9 @@ static inline int drv_sta_add(struct ieee80211_local *local,
 
        might_sleep();
 
+       sdata = get_bss_sdata(sdata);
+       check_sdata_in_driver(sdata);
+
        trace_drv_sta_add(local, sdata, sta);
        if (local->ops->sta_add)
                ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
@@ -406,6 +466,9 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
 {
        might_sleep();
 
+       sdata = get_bss_sdata(sdata);
+       check_sdata_in_driver(sdata);
+
        trace_drv_sta_remove(local, sdata, sta);
        if (local->ops->sta_remove)
                local->ops->sta_remove(&local->hw, &sdata->vif, sta);
@@ -421,6 +484,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_conf_tx(local, sdata, queue, params);
        if (local->ops->conf_tx)
                ret = local->ops->conf_tx(&local->hw, &sdata->vif,
@@ -436,6 +501,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_get_tsf(local, sdata);
        if (local->ops->get_tsf)
                ret = local->ops->get_tsf(&local->hw, &sdata->vif);
@@ -449,6 +516,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local,
 {
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_set_tsf(local, sdata, tsf);
        if (local->ops->set_tsf)
                local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
@@ -460,6 +529,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local,
 {
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_reset_tsf(local, sdata);
        if (local->ops->reset_tsf)
                local->ops->reset_tsf(&local->hw, &sdata->vif);
@@ -489,6 +560,9 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
 
        might_sleep();
 
+       sdata = get_bss_sdata(sdata);
+       check_sdata_in_driver(sdata);
+
        trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
 
        if (local->ops->ampdu_action)
@@ -644,6 +718,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
 
        might_sleep();
 
+       check_sdata_in_driver(sdata);
+
        trace_drv_set_bitrate_mask(local, sdata, mask);
        if (local->ops->set_bitrate_mask)
                ret = local->ops->set_bitrate_mask(&local->hw,
@@ -657,6 +733,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local,
                                      struct ieee80211_sub_if_data *sdata,
                                      struct cfg80211_gtk_rekey_data *data)
 {
+       check_sdata_in_driver(sdata);
+
        trace_drv_set_rekey_data(local, sdata, data);
        if (local->ops->set_rekey_data)
                local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
index 2af4fca..6e9df8f 100644 (file)
@@ -5,17 +5,6 @@
 #include <net/mac80211.h>
 #include "ieee80211_i.h"
 
-#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(...)
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(evt_class, name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM mac80211
 
index f0fb737..0fd9c2a 100644 (file)
 #include "ieee80211_i.h"
 #include "rate.h"
 
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
+bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
+{
+       const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+       if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
+           !(sdata->u.mgd.ht_capa.cap_info & flg))
+               return true;
+       return false;
+}
+
+static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee80211_sta_ht_cap *ht_cap,
+                                 u16 flag)
+{
+       __le16 le_flag = cpu_to_le16(flag);
+       if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
+               if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+                       ht_cap->cap &= ~flag;
+       }
+}
+
+void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+                                    struct ieee80211_sta_ht_cap *ht_cap)
+{
+       u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
+       u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+       int i;
+
+       if (sdata->vif.type != NL80211_IFTYPE_STATION) {
+               /* AP interfaces call this code when adding new stations,
+                * so just silently ignore non station interfaces.
+                */
+               return;
+       }
+
+       /* NOTE:  If you add more over-rides here, update register_hw
+        * ht_capa_mod_msk logic in main.c as well.
+        * And, if this method can ever change ht_cap.ht_supported, fix
+        * the check in ieee80211_add_ht_ie.
+        */
+
+       /* check for HT over-rides, MCS rates first. */
+       for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+               u8 m = smask[i];
+               ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */
+               /* Add back rates that are supported */
+               ht_cap->mcs.rx_mask[i] |= (m & scaps[i]);
+       }
+
+       /* Force removal of HT-40 capabilities? */
+       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+
+       /* Allow user to disable the max-AMSDU bit. */
+       __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+
+       /* Allow user to decrease AMPDU factor */
+       if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+           IEEE80211_HT_AMPDU_PARM_FACTOR) {
+               u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
+                       & IEEE80211_HT_AMPDU_PARM_FACTOR;
+               if (n < ht_cap->ampdu_factor)
+                       ht_cap->ampdu_factor = n;
+       }
+
+       /* Allow the user to increase AMPDU density. */
+       if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+           IEEE80211_HT_AMPDU_PARM_DENSITY) {
+               u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+                       IEEE80211_HT_AMPDU_PARM_DENSITY)
+                       >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
+               if (n > ht_cap->ampdu_density)
+                       ht_cap->ampdu_density = n;
+       }
+}
+
+
+void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+                                      struct ieee80211_supported_band *sband,
                                       struct ieee80211_ht_cap *ht_cap_ie,
                                       struct ieee80211_sta_ht_cap *ht_cap)
 {
@@ -103,6 +180,12 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
        /* handle MCS rate 32 too */
        if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
                ht_cap->mcs.rx_mask[32/8] |= 1;
+
+       /*
+        * If user has specified capability over-rides, take care
+        * of that here.
+        */
+       ieee80211_apply_htcap_overrides(sdata, ht_cap);
 }
 
 void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx)
@@ -196,10 +279,13 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
        memcpy(mgmt->da, da, ETH_ALEN);
        memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
        if (sdata->vif.type == NL80211_IFTYPE_AP ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+           sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
        else if (sdata->vif.type == NL80211_IFTYPE_STATION)
                memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
 
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
index ede9a8b..3f830ac 100644 (file)
@@ -77,6 +77,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        struct cfg80211_bss *bss;
        u32 bss_change;
        u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
+       enum nl80211_channel_type channel_type;
 
        lockdep_assert_held(&ifibss->mtx);
 
@@ -97,6 +98,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        /* if merging, indicate to driver that we leave the old IBSS */
        if (sdata->vif.bss_conf.ibss_joined) {
                sdata->vif.bss_conf.ibss_joined = false;
+               netif_carrier_off(sdata->dev);
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
        }
 
@@ -104,8 +106,16 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
-       local->oper_channel = chan;
-       WARN_ON(!ieee80211_set_channel_type(local, sdata, NL80211_CHAN_NO_HT));
+       channel_type = ifibss->channel_type;
+       if (channel_type > NL80211_CHAN_HT20 &&
+           !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
+               channel_type = NL80211_CHAN_HT20;
+       if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
+               /* can only fail due to HT40+/- mismatch */
+               channel_type = NL80211_CHAN_HT20;
+               WARN_ON(!ieee80211_set_channel_type(local, sdata,
+                                                   NL80211_CHAN_HT20));
+       }
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
        sband = local->hw.wiphy->bands[chan->band];
@@ -171,6 +181,19 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                memcpy(skb_put(skb, ifibss->ie_len),
                       ifibss->ie, ifibss->ie_len);
 
+       /* add HT capability and information IEs */
+       if (channel_type && sband->ht_cap.ht_supported) {
+               pos = skb_put(skb, 4 +
+                                  sizeof(struct ieee80211_ht_cap) +
+                                  sizeof(struct ieee80211_ht_info));
+               pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+                                               sband->ht_cap.cap);
+               pos = ieee80211_ie_build_ht_info(pos,
+                                                &sband->ht_cap,
+                                                chan,
+                                                channel_type);
+       }
+
        if (local->hw.queues >= 4) {
                pos = skb_put(skb, 9);
                *pos++ = WLAN_EID_VENDOR_SPECIFIC;
@@ -194,6 +217,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        bss_change |= BSS_CHANGED_BEACON;
        bss_change |= BSS_CHANGED_BEACON_ENABLED;
        bss_change |= BSS_CHANGED_BASIC_RATES;
+       bss_change |= BSS_CHANGED_HT;
        bss_change |= BSS_CHANGED_IBSS;
        sdata->vif.bss_conf.ibss_joined = true;
        ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -207,6 +231,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
                                        mgmt, skb->len, 0, GFP_KERNEL);
        cfg80211_put_bss(bss);
+       netif_carrier_on(sdata->dev);
        cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
 }
 
@@ -266,6 +291,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        u64 beacon_timestamp, rx_timestamp;
        u32 supp_rates = 0;
        enum ieee80211_band band = rx_status->band;
+       struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+       bool rates_updated = false;
 
        if (elems->ds_params && elems->ds_params_len == 1)
                freq = ieee80211_channel_to_frequency(elems->ds_params[0],
@@ -305,7 +332,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                                prev_rates,
                                                sta->sta.supp_rates[band]);
 #endif
-                                       rate_control_rate_init(sta);
+                                       rates_updated = true;
                                }
                        } else
                                sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
@@ -316,6 +343,39 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                if (sta && elems->wmm_info)
                        set_sta_flag(sta, WLAN_STA_WME);
 
+               if (sta && elems->ht_info_elem && elems->ht_cap_elem &&
+                   sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
+                       /* we both use HT */
+                       struct ieee80211_sta_ht_cap sta_ht_cap_new;
+                       enum nl80211_channel_type channel_type =
+                               ieee80211_ht_info_to_channel_type(
+                                                       elems->ht_info_elem);
+
+                       ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+                                                         elems->ht_cap_elem,
+                                                         &sta_ht_cap_new);
+
+                       /*
+                        * fall back to HT20 if we don't use or use
+                        * the other extension channel
+                        */
+                       if ((channel_type == NL80211_CHAN_HT40MINUS ||
+                            channel_type == NL80211_CHAN_HT40PLUS) &&
+                           channel_type != sdata->u.ibss.channel_type)
+                               sta_ht_cap_new.cap &=
+                                       ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+                       if (memcmp(&sta->sta.ht_cap, &sta_ht_cap_new,
+                                  sizeof(sta_ht_cap_new))) {
+                               memcpy(&sta->sta.ht_cap, &sta_ht_cap_new,
+                                      sizeof(sta_ht_cap_new));
+                               rates_updated = true;
+                       }
+               }
+
+               if (sta && rates_updated)
+                       rate_control_rate_init(sta);
+
                rcu_read_unlock();
        }
 
@@ -894,12 +954,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
                        struct cfg80211_ibss_params *params)
 {
        struct sk_buff *skb;
+       u32 changed = 0;
 
        skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
-                           36 /* bitrates */ +
-                           34 /* SSID */ +
-                           3  /* DS params */ +
-                           4  /* IBSS params */ +
+                           sizeof(struct ieee80211_hdr_3addr) +
+                           12 /* struct ieee80211_mgmt.u.beacon */ +
+                           2 + IEEE80211_MAX_SSID_LEN /* max SSID */ +
+                           2 + 8 /* max Supported Rates */ +
+                           3 /* max DS params */ +
+                           4 /* IBSS params */ +
+                           2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+                           2 + sizeof(struct ieee80211_ht_cap) +
+                           2 + sizeof(struct ieee80211_ht_info) +
                            params->ie_len);
        if (!skb)
                return -ENOMEM;
@@ -920,13 +986,15 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
        sdata->u.ibss.channel = params->channel;
+       sdata->u.ibss.channel_type = params->channel_type;
        sdata->u.ibss.fixed_channel = params->channel_fixed;
 
        /* fix ourselves to that channel now already */
        if (params->channel_fixed) {
                sdata->local->oper_channel = params->channel;
-               WARN_ON(!ieee80211_set_channel_type(sdata->local, sdata,
-                                                   NL80211_CHAN_NO_HT));
+               if (!ieee80211_set_channel_type(sdata->local, sdata,
+                                              params->channel_type))
+                       return -EINVAL;
        }
 
        if (params->ie) {
@@ -949,6 +1017,23 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        ieee80211_recalc_idle(sdata->local);
        mutex_unlock(&sdata->local->mtx);
 
+       /*
+        * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
+        * reserved, but an HT STA shall protect HT transmissions as though
+        * the HT Protection field were set to non-HT mixed mode.
+        *
+        * In an IBSS, the RIFS Mode field of the HT Operation element is
+        * also reserved, but an HT STA shall operate as though this field
+        * were set to 1.
+        */
+
+       sdata->vif.bss_conf.ht_operation_mode |=
+                 IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED
+               | IEEE80211_HT_PARAM_RIFS_MODE;
+
+       changed |= BSS_CHANGED_HT;
+       ieee80211_bss_info_change_notify(sdata, changed);
+
        ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 
        return 0;
@@ -990,6 +1075,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        }
 
        sta_info_flush(sdata->local, sdata);
+       netif_carrier_off(sdata->dev);
 
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
index ea10a51..96fe754 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/spinlock.h>
 #include <linux/etherdevice.h>
 #include <linux/leds.h>
+#include <linux/idr.h>
 #include <net/ieee80211_radiotap.h>
 #include <net/cfg80211.h>
 #include <net/mac80211.h>
@@ -141,6 +142,7 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
 
 struct ieee80211_tx_data {
        struct sk_buff *skb;
+       struct sk_buff_head skbs;
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
        struct sta_info *sta;
@@ -184,12 +186,15 @@ enum ieee80211_packet_rx_flags {
  * enum ieee80211_rx_flags - RX data flags
  *
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
+ * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
+ *     to cfg80211_report_obss_beacon().
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
  */
 enum ieee80211_rx_flags {
        IEEE80211_RX_CMNTR              = BIT(0),
+       IEEE80211_RX_BEACON_REPORTED    = BIT(1),
 };
 
 struct ieee80211_rx_data {
@@ -228,6 +233,7 @@ struct beacon_data {
 
 struct ieee80211_if_ap {
        struct beacon_data __rcu *beacon;
+       struct sk_buff __rcu *probe_resp;
 
        struct list_head vlans;
 
@@ -443,6 +449,9 @@ struct ieee80211_if_managed {
         */
        int rssi_min_thold, rssi_max_thold;
        int last_ave_beacon_signal;
+
+       struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+       struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
 };
 
 struct ieee80211_if_ibss {
@@ -465,6 +474,7 @@ struct ieee80211_if_ibss {
        u8 ssid_len, ie_len;
        u8 *ie;
        struct ieee80211_channel *channel;
+       enum nl80211_channel_type channel_type;
 
        unsigned long ibss_join_req;
        /* probe response/beacon for IBSS */
@@ -505,7 +515,9 @@ struct ieee80211_if_mesh {
        atomic_t mpaths;
        /* Timestamp of last SN update */
        unsigned long last_sn_update;
-       /* Timestamp of last SN sent */
+       /* Time when it's ok to send next PERR */
+       unsigned long next_perr;
+       /* Timestamp of last PREQ sent */
        unsigned long last_preq;
        struct mesh_rmc *rmc;
        spinlock_t mesh_preq_queue_lock;
@@ -543,6 +555,7 @@ struct ieee80211_if_mesh {
  *     associated stations and deliver multicast frames both
  *     back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
+ * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
  */
 enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_ALLMULTI                = BIT(0),
@@ -550,6 +563,7 @@ enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_OPERATING_GMODE         = BIT(2),
        IEEE80211_SDATA_DONT_BRIDGE_PACKETS     = BIT(3),
        IEEE80211_SDATA_DISCONNECT_RESUME       = BIT(4),
+       IEEE80211_SDATA_IN_DRIVER               = BIT(5),
 };
 
 /**
@@ -600,6 +614,9 @@ struct ieee80211_sub_if_data {
        struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
        unsigned int fragment_next;
 
+       /* TID bitmap for NoAck policy */
+       u16 noack_map;
+
        struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
        struct ieee80211_key __rcu *default_unicast_key;
        struct ieee80211_key __rcu *default_multicast_key;
@@ -722,17 +739,16 @@ enum {
  *     operating channel
  * @SCAN_SET_CHANNEL: Set the next channel to be scanned
  * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
- * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP
- *     about us leaving the channel and stop all associated STA interfaces
- * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the
- *     AP about us being back and restart all associated STA interfaces
+ * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
+ *     send out data
+ * @SCAN_RESUME: Resume the scan and scan the next channel
  */
 enum mac80211_scan_state {
        SCAN_DECISION,
        SCAN_SET_CHANNEL,
        SCAN_SEND_PROBE,
-       SCAN_LEAVE_OPER_CHANNEL,
-       SCAN_ENTER_OPER_CHANNEL,
+       SCAN_SUSPEND,
+       SCAN_RESUME,
 };
 
 struct ieee80211_local {
@@ -951,7 +967,6 @@ struct ieee80211_local {
        int total_ps_buffered; /* total number of all buffered unicast and
                                * multicast packets for power saving stations
                                */
-       int wifi_wme_noack_test;
        unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
 
        /*
@@ -1012,6 +1027,9 @@ struct ieee80211_local {
        u32 hw_roc_cookie;
        bool hw_roc_for_tx;
 
+       struct idr ack_status_frames;
+       spinlock_t ack_status_lock;
+
        /* dummy netdev for use w/ NAPI */
        struct net_device napi_dev;
 
@@ -1030,6 +1048,69 @@ struct ieee80211_ra_tid {
        u16 tid;
 };
 
+/* Parsed Information Elements */
+struct ieee802_11_elems {
+       u8 *ie_start;
+       size_t total_len;
+
+       /* pointers to IEs */
+       u8 *ssid;
+       u8 *supp_rates;
+       u8 *fh_params;
+       u8 *ds_params;
+       u8 *cf_params;
+       struct ieee80211_tim_ie *tim;
+       u8 *ibss_params;
+       u8 *challenge;
+       u8 *wpa;
+       u8 *rsn;
+       u8 *erp_info;
+       u8 *ext_supp_rates;
+       u8 *wmm_info;
+       u8 *wmm_param;
+       struct ieee80211_ht_cap *ht_cap_elem;
+       struct ieee80211_ht_info *ht_info_elem;
+       struct ieee80211_meshconf_ie *mesh_config;
+       u8 *mesh_id;
+       u8 *peering;
+       u8 *preq;
+       u8 *prep;
+       u8 *perr;
+       struct ieee80211_rann_ie *rann;
+       u8 *ch_switch_elem;
+       u8 *country_elem;
+       u8 *pwr_constr_elem;
+       u8 *quiet_elem; /* first quite element */
+       u8 *timeout_int;
+
+       /* length of them, respectively */
+       u8 ssid_len;
+       u8 supp_rates_len;
+       u8 fh_params_len;
+       u8 ds_params_len;
+       u8 cf_params_len;
+       u8 tim_len;
+       u8 ibss_params_len;
+       u8 challenge_len;
+       u8 wpa_len;
+       u8 rsn_len;
+       u8 erp_info_len;
+       u8 ext_supp_rates_len;
+       u8 wmm_info_len;
+       u8 wmm_param_len;
+       u8 mesh_id_len;
+       u8 peering_len;
+       u8 preq_len;
+       u8 prep_len;
+       u8 perr_len;
+       u8 ch_switch_elem_len;
+       u8 country_elem_len;
+       u8 pwr_constr_elem_len;
+       u8 quiet_elem_len;
+       u8 num_of_quiet_elem;   /* can be more the one */
+       u8 timeout_int_len;
+};
+
 static inline struct ieee80211_local *hw_to_local(
        struct ieee80211_hw *hw)
 {
@@ -1140,13 +1221,11 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sched_scan_stopped_work(struct work_struct *work);
 
 /* off-channel helpers */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
 void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
                                        bool tell_ap);
 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
                                    bool offchannel_ps_enable);
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing,
                                 bool offchannel_ps_disable);
 void ieee80211_hw_roc_setup(struct ieee80211_local *local);
 
@@ -1179,7 +1258,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                                       struct net_device *dev);
 
 /* HT */
-void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
+bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
+void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
+                                    struct ieee80211_sta_ht_cap *ht_cap);
+void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
+                                      struct ieee80211_supported_band *sband,
                                       struct ieee80211_ht_cap *ht_cap_ie,
                                       struct ieee80211_sta_ht_cap *ht_cap);
 void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
@@ -1334,6 +1417,12 @@ void ieee80211_recalc_smps(struct ieee80211_local *local);
 size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
                          const u8 *ids, int n_ids, size_t offset);
 size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+                             u16 cap);
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+                               struct ieee80211_sta_ht_cap *ht_cap,
+                               struct ieee80211_channel *channel,
+                               enum nl80211_channel_type channel_type);
 
 /* internal work items */
 void ieee80211_work_init(struct ieee80211_local *local);
@@ -1362,6 +1451,8 @@ ieee80211_get_channel_mode(struct ieee80211_local *local,
 bool ieee80211_set_channel_type(struct ieee80211_local *local,
                                struct ieee80211_sub_if_data *sdata,
                                enum nl80211_channel_type chantype);
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info);
 
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
index 30d7355..3d3bb5e 100644 (file)
@@ -188,11 +188,22 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
                        return -ENOLINK;
                break;
-       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_AP_VLAN: {
+               struct ieee80211_sub_if_data *master;
+
                if (!sdata->bss)
                        return -ENOLINK;
+
                list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+
+               master = container_of(sdata->bss,
+                                     struct ieee80211_sub_if_data, u.ap);
+               sdata->control_port_protocol =
+                       master->control_port_protocol;
+               sdata->control_port_no_encrypt =
+                       master->control_port_no_encrypt;
                break;
+               }
        case NL80211_IFTYPE_AP:
                sdata->bss = &sdata->u.ap;
                break;
@@ -265,7 +276,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                break;
        default:
                if (coming_up) {
-                       res = drv_add_interface(local, &sdata->vif);
+                       res = drv_add_interface(local, sdata);
                        if (res)
                                goto err_stop;
                }
@@ -282,10 +293,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
                changed |= ieee80211_reset_erp_info(sdata);
                ieee80211_bss_info_change_notify(sdata, changed);
 
-               if (sdata->vif.type == NL80211_IFTYPE_STATION)
+               if (sdata->vif.type == NL80211_IFTYPE_STATION ||
+                   sdata->vif.type == NL80211_IFTYPE_ADHOC)
                        netif_carrier_off(dev);
                else
                        netif_carrier_on(dev);
+
+               /*
+                * set default queue parameters so drivers don't
+                * need to initialise the hardware if the hardware
+                * doesn't start up with sane defaults
+                */
+               ieee80211_set_wmm_default(sdata);
        }
 
        set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -329,15 +348,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
        if (coming_up)
                local->open_count++;
 
-       if (hw_reconf_flags) {
+       if (hw_reconf_flags)
                ieee80211_hw_config(local, hw_reconf_flags);
-               /*
-                * set default queue parameters so drivers don't
-                * need to initialise the hardware if the hardware
-                * doesn't start up with sane defaults
-                */
-               ieee80211_set_wmm_default(sdata);
-       }
 
        ieee80211_recalc_ps(local, -1);
 
@@ -345,7 +357,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
 
        return 0;
  err_del_interface:
-       drv_remove_interface(local, &sdata->vif);
+       drv_remove_interface(local, sdata);
  err_stop:
        if (!local->open_count)
                drv_stop(local);
@@ -450,15 +462,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                struct ieee80211_sub_if_data *vlan, *tmpsdata;
                struct beacon_data *old_beacon =
                        rtnl_dereference(sdata->u.ap.beacon);
+               struct sk_buff *old_probe_resp =
+                       rtnl_dereference(sdata->u.ap.probe_resp);
 
                /* sdata_running will return false, so this will disable */
                ieee80211_bss_info_change_notify(sdata,
                                                 BSS_CHANGED_BEACON_ENABLED);
 
-               /* remove beacon */
+               /* remove beacon and probe response */
                RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+               RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
                synchronize_rcu();
                kfree(old_beacon);
+               kfree_skb(old_probe_resp);
 
                /* down all dependent devices, that is VLANs */
                list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
@@ -520,7 +536,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                ieee80211_free_keys(sdata);
 
                if (going_down)
-                       drv_remove_interface(local, &sdata->vif);
+                       drv_remove_interface(local, sdata);
        }
 
        sdata->bss = NULL;
@@ -656,7 +672,6 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_hdr *hdr;
        struct ieee80211_radiotap_header *rtap = (void *)skb->data;
-       u8 *p;
 
        if (local->hw.queues < 4)
                return 0;
@@ -667,19 +682,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
 
        hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
 
-       if (!ieee80211_is_data(hdr->frame_control)) {
-               skb->priority = 7;
-               return ieee802_1d_to_ac[skb->priority];
-       }
-       if (!ieee80211_is_data_qos(hdr->frame_control)) {
-               skb->priority = 0;
-               return ieee802_1d_to_ac[skb->priority];
-       }
-
-       p = ieee80211_get_qos_ctl(hdr);
-       skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
-
-       return ieee80211_downgrade_queue(local, skb);
+       return ieee80211_select_queue_80211(local, skb, hdr);
 }
 
 static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -850,6 +853,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
        sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
        sdata->control_port_no_encrypt = false;
 
+       sdata->noack_map = 0;
+
        /* only monitor differs */
        sdata->dev->type = ARPHRD_ETHER;
 
index fb02ea5..87a8974 100644 (file)
@@ -134,9 +134,13 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
                key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
                if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
-                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+                     (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+                     (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                        sdata->crypto_tx_tailroom_needed_cnt--;
 
+               WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
+                       (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
+
                return 0;
        }
 
@@ -179,7 +183,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
        sdata = key->sdata;
 
        if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
-             (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
+             (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+             (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
                increment_tailroom_need_count(sdata);
 
        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
index cae4435..60198ac 100644 (file)
@@ -92,50 +92,9 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
        ieee80211_configure_filter(local);
 }
 
-/*
- * Returns true if we are logically configured to be on
- * the operating channel AND the hardware-conf is currently
- * configured on the operating channel.  Compares channel-type
- * as well.
- */
-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
-{
-       struct ieee80211_channel *chan, *scan_chan;
-       enum nl80211_channel_type channel_type;
-
-       /* This logic needs to match logic in ieee80211_hw_config */
-       if (local->scan_channel) {
-               chan = local->scan_channel;
-               /* If scanning on oper channel, use whatever channel-type
-                * is currently in use.
-                */
-               if (chan == local->oper_channel)
-                       channel_type = local->_oper_channel_type;
-               else
-                       channel_type = NL80211_CHAN_NO_HT;
-       } else if (local->tmp_channel) {
-               chan = scan_chan = local->tmp_channel;
-               channel_type = local->tmp_channel_type;
-       } else {
-               chan = local->oper_channel;
-               channel_type = local->_oper_channel_type;
-       }
-
-       if (chan != local->oper_channel ||
-           channel_type != local->_oper_channel_type)
-               return false;
-
-       /* Check current hardware-config against oper_channel. */
-       if ((local->oper_channel != local->hw.conf.channel) ||
-           (local->_oper_channel_type != local->hw.conf.channel_type))
-               return false;
-
-       return true;
-}
-
 int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 {
-       struct ieee80211_channel *chan, *scan_chan;
+       struct ieee80211_channel *chan;
        int ret = 0;
        int power;
        enum nl80211_channel_type channel_type;
@@ -143,14 +102,12 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
 
        might_sleep();
 
-       scan_chan = local->scan_channel;
-
        /* If this off-channel logic ever changes,  ieee80211_on_oper_channel
         * may need to change as well.
         */
        offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
-       if (scan_chan) {
-               chan = scan_chan;
+       if (local->scan_channel) {
+               chan = local->scan_channel;
                /* If scanning on oper channel, use whatever channel-type
                 * is currently in use.
                 */
@@ -159,7 +116,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
                else
                        channel_type = NL80211_CHAN_NO_HT;
        } else if (local->tmp_channel) {
-               chan = scan_chan = local->tmp_channel;
+               chan = local->tmp_channel;
                channel_type = local->tmp_channel_type;
        } else {
                chan = local->oper_channel;
@@ -560,6 +517,19 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
        },
 };
 
+static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
+       .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR |
+                            IEEE80211_HT_AMPDU_PARM_DENSITY,
+
+       .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                               IEEE80211_HT_CAP_MAX_AMSDU |
+                               IEEE80211_HT_CAP_SGI_40),
+       .mcs = {
+               .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
+                            0xff, 0xff, 0xff, 0xff, 0xff, },
+       },
+};
+
 struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
                                        const struct ieee80211_ops *ops)
 {
@@ -595,7 +565,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        wiphy->flags |= WIPHY_FLAG_NETNS_OK |
                        WIPHY_FLAG_4ADDR_AP |
-                       WIPHY_FLAG_4ADDR_STATION;
+                       WIPHY_FLAG_4ADDR_STATION |
+                       WIPHY_FLAG_REPORTS_OBSS |
+                       WIPHY_FLAG_OFFCHAN_TX |
+                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+       wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
+                         NL80211_FEATURE_HT_IBSS;
 
        if (!ops->set_key)
                wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -608,7 +584,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
 
        local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
 
-       BUG_ON(!ops->tx);
+       BUG_ON(!ops->tx && !ops->tx_frags);
        BUG_ON(!ops->start);
        BUG_ON(!ops->stop);
        BUG_ON(!ops->config);
@@ -628,6 +604,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        local->user_power_level = -1;
        local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
        local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
+       wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
 
        INIT_LIST_HEAD(&local->interfaces);
 
@@ -670,6 +647,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
        INIT_WORK(&local->sched_scan_stopped_work,
                  ieee80211_sched_scan_stopped_work);
 
+       spin_lock_init(&local->ack_status_lock);
+       idr_init(&local->ack_status_frames);
+       /* preallocate at least one entry */
+       idr_pre_get(&local->ack_status_frames, GFP_KERNEL);
+
        sta_info_init(local);
 
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
@@ -1051,6 +1033,13 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL(ieee80211_unregister_hw);
 
+static int ieee80211_free_ack_frame(int id, void *p, void *data)
+{
+       WARN_ONCE(1, "Have pending ack frames!\n");
+       kfree_skb(p);
+       return 0;
+}
+
 void ieee80211_free_hw(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
@@ -1061,6 +1050,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
        if (local->wiphy_ciphers_allocated)
                kfree(local->hw.wiphy->cipher_suites);
 
+       idr_for_each(&local->ack_status_frames,
+                    ieee80211_free_ack_frame, NULL);
+       idr_destroy(&local->ack_status_frames);
+
        wiphy_free(local->hw.wiphy);
 }
 EXPORT_SYMBOL(ieee80211_free_hw);
index a7078fd..c707c8b 100644 (file)
@@ -76,6 +76,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
 bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct ieee80211_local *local = sdata->local;
 
        /*
         * As support for each feature is added, check for matching
@@ -87,15 +88,23 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
         *   - MDA enabled
         * - Power management control on fc
         */
-       if (ifmsh->mesh_id_len == ie->mesh_id_len &&
-               memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
-               (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
-               (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
-               (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
-               (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
-               (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))
-               return true;
-
+       if (!(ifmsh->mesh_id_len == ie->mesh_id_len &&
+            memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
+            (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+            (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
+            (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
+            (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
+            (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
+               goto mismatch;
+
+       /* disallow peering with mismatched channel types for now */
+       if (ie->ht_info_elem &&
+           (local->_oper_channel_type !=
+            ieee80211_ht_info_to_channel_type(ie->ht_info_elem)))
+               goto mismatch;
+
+       return true;
+mismatch:
        return false;
 }
 
@@ -341,6 +350,49 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
        return 0;
 }
 
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+                      struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_supported_band *sband;
+       u8 *pos;
+
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
+       if (!sband->ht_cap.ht_supported ||
+           local->_oper_channel_type == NL80211_CHAN_NO_HT)
+               return 0;
+
+       if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
+               return -ENOMEM;
+
+       pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
+       ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap);
+
+       return 0;
+}
+
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+                       struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_channel *channel = local->oper_channel;
+       enum nl80211_channel_type channel_type = local->_oper_channel_type;
+       struct ieee80211_supported_band *sband =
+                               local->hw.wiphy->bands[channel->band];
+       struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+       u8 *pos;
+
+       if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
+               return 0;
+
+       if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info))
+               return -ENOMEM;
+
+       pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info));
+       ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type);
+
+       return 0;
+}
 static void ieee80211_mesh_path_timer(unsigned long data)
 {
        struct ieee80211_sub_if_data *sdata =
@@ -697,6 +749,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        atomic_set(&ifmsh->mpaths, 0);
        mesh_rmc_init(sdata);
        ifmsh->last_preq = jiffies;
+       ifmsh->next_perr = jiffies;
        /* Allocate all mesh structures when creating the first mesh interface. */
        if (!mesh_allocated)
                ieee80211s_init();
index 8c00e2d..bd14bd2 100644 (file)
@@ -31,6 +31,8 @@
  * @MESH_PATH_FIXED: the mesh path has been manually set and should not be
  *     modified
  * @MESH_PATH_RESOLVED: the mesh path can has been resolved
+ * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination
+ * already queued up, waiting for the discovery process to start.
  *
  * MESH_PATH_RESOLVED is used by the mesh path timer to
  * decide when to stop or cancel the mesh path discovery.
@@ -41,6 +43,7 @@ enum mesh_path_flags {
        MESH_PATH_SN_VALID =    BIT(2),
        MESH_PATH_FIXED =       BIT(3),
        MESH_PATH_RESOLVED =    BIT(4),
+       MESH_PATH_REQ_QUEUED =  BIT(5),
 };
 
 /**
@@ -212,6 +215,10 @@ int mesh_add_vendor_ies(struct sk_buff *skb,
                        struct ieee80211_sub_if_data *sdata);
 int mesh_add_ds_params_ie(struct sk_buff *skb,
                          struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_cap_ie(struct sk_buff *skb,
+                      struct ieee80211_sub_if_data *sdata);
+int mesh_add_ht_info_ie(struct sk_buff *skb,
+                       struct ieee80211_sub_if_data *sdata);
 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
@@ -226,6 +233,8 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
 /* Mesh paths */
 int mesh_nexthop_lookup(struct sk_buff *skb,
                struct ieee80211_sub_if_data *sdata);
+int mesh_nexthop_resolve(struct sk_buff *skb,
+                        struct ieee80211_sub_if_data *sdata);
 void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
 struct mesh_path *mesh_path_lookup(u8 *dst,
                struct ieee80211_sub_if_data *sdata);
index 174040a..73abb75 100644 (file)
@@ -113,20 +113,20 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
                struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+       struct sk_buff *skb;
        struct ieee80211_mgmt *mgmt;
-       u8 *pos;
-       int ie_len;
+       u8 *pos, ie_len;
+       int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+                     sizeof(mgmt->u.action.u.mesh_action);
 
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                           hdr_len +
+                           2 + 37); /* max HWMP IE */
        if (!skb)
                return -1;
        skb_reserve(skb, local->hw.extra_tx_headroom);
-       /* 25 is the size of the common mgmt part (24) plus the size of the
-        * common action part (1)
-        */
-       mgmt = (struct ieee80211_mgmt *)
-               skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
-       memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+       mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+       memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
 
@@ -240,20 +240,24 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
                       struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
+       struct sk_buff *skb;
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct ieee80211_mgmt *mgmt;
-       u8 *pos;
-       int ie_len;
+       u8 *pos, ie_len;
+       int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
+                     sizeof(mgmt->u.action.u.mesh_action);
 
+       if (time_before(jiffies, ifmsh->next_perr))
+               return -EAGAIN;
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                           hdr_len +
+                           2 + 15 /* PERR IE */);
        if (!skb)
                return -1;
        skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
-       /* 25 is the size of the common mgmt part (24) plus the size of the
-        * common action part (1)
-        */
-       mgmt = (struct ieee80211_mgmt *)
-               skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
-       memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
+       mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+       memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
 
@@ -290,6 +294,8 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
 
        /* see note in function header */
        prepare_frame_for_deferred_tx(sdata, skb);
+       ifmsh->next_perr = TU_TO_EXP_TIME(
+                                  ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
        ieee80211_add_pending_skb(local, skb);
        return 0;
 }
@@ -393,15 +399,13 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
                orig_metric = PREQ_IE_METRIC(hwmp_ie);
                break;
        case MPATH_PREP:
-               /* Originator here refers to the MP that was the destination in
-                * the Path Request. The draft refers to that MP as the
-                * destination address, even though usually it is the origin of
-                * the PREP frame. We divert from the nomenclature in the draft
+               /* Originator here refers to the MP that was the target in the
+                * Path Request. We divert from the nomenclature in the draft
                 * so that we can easily use a single function to gather path
                 * information from both PREQ and PREP frames.
                 */
-               orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
-               orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
+               orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
+               orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
                orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
                orig_metric = PREP_IE_METRIC(hwmp_ie);
                break;
@@ -562,9 +566,9 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                ttl = ifmsh->mshcfg.element_ttl;
                if (ttl != 0) {
                        mhwmp_dbg("replying to the PREQ");
-                       mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
-                               cpu_to_le32(target_sn), 0, orig_addr,
-                               cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
+                       mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
+                               cpu_to_le32(orig_sn), 0, target_addr,
+                               cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
                                cpu_to_le32(lifetime), cpu_to_le32(metric),
                                0, sdata);
                } else
@@ -618,14 +622,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
 
        mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
 
-       /* Note that we divert from the draft nomenclature and denominate
-        * destination to what the draft refers to as origininator. So in this
-        * function destnation refers to the final destination of the PREP,
-        * which corresponds with the originator of the PREQ which this PREP
-        * replies
-        */
-       target_addr = PREP_IE_TARGET_ADDR(prep_elem);
-       if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
+       orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+       if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
                /* destination, no forwarding required */
                return;
 
@@ -636,7 +634,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
        }
 
        rcu_read_lock();
-       mpath = mesh_path_lookup(target_addr, sdata);
+       mpath = mesh_path_lookup(orig_addr, sdata);
        if (mpath)
                spin_lock_bh(&mpath->state_lock);
        else
@@ -651,7 +649,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
        flags = PREP_IE_FLAGS(prep_elem);
        lifetime = PREP_IE_LIFETIME(prep_elem);
        hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
-       orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
+       target_addr = PREP_IE_TARGET_ADDR(prep_elem);
        target_sn = PREP_IE_TARGET_SN(prep_elem);
        orig_sn = PREP_IE_ORIG_SN(prep_elem);
 
@@ -867,9 +865,20 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
                return;
        }
 
+       spin_lock(&mpath->state_lock);
+       if (mpath->flags & MESH_PATH_REQ_QUEUED) {
+               spin_unlock(&mpath->state_lock);
+               spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+               kfree(preq_node);
+               return;
+       }
+
        memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
        preq_node->flags = flags;
 
+       mpath->flags |= MESH_PATH_REQ_QUEUED;
+       spin_unlock(&mpath->state_lock);
+
        list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
        ++ifmsh->preq_queue_len;
        spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -921,6 +930,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
                goto enddiscovery;
 
        spin_lock_bh(&mpath->state_lock);
+       mpath->flags &= ~MESH_PATH_REQ_QUEUED;
        if (preq_node->flags & PREQ_Q_F_START) {
                if (mpath->flags & MESH_PATH_RESOLVING) {
                        spin_unlock_bh(&mpath->state_lock);
@@ -972,71 +982,97 @@ enddiscovery:
        kfree(preq_node);
 }
 
-/**
- * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame
+/* mesh_nexthop_resolve - lookup next hop for given skb and start path
+ * discovery if no forwarding information is found.
  *
  * @skb: 802.11 frame to be sent
  * @sdata: network subif the frame will be sent through
  *
- * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
- * found, the function will start a path discovery and queue the frame so it is
- * sent when the path is resolved. This means the caller must not free the skb
- * in this case.
+ * Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
+ * skb is freeed here if no mpath could be allocated.
  */
-int mesh_nexthop_lookup(struct sk_buff *skb,
-                       struct ieee80211_sub_if_data *sdata)
+int mesh_nexthop_resolve(struct sk_buff *skb,
+                        struct ieee80211_sub_if_data *sdata)
 {
-       struct sk_buff *skb_to_free = NULL;
-       struct mesh_path *mpath;
-       struct sta_info *next_hop;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mesh_path *mpath;
+       struct sk_buff *skb_to_free = NULL;
        u8 *target_addr = hdr->addr3;
        int err = 0;
 
        rcu_read_lock();
-       mpath = mesh_path_lookup(target_addr, sdata);
+       err = mesh_nexthop_lookup(skb, sdata);
+       if (!err)
+               goto endlookup;
 
+       /* no nexthop found, start resolving */
+       mpath = mesh_path_lookup(target_addr, sdata);
        if (!mpath) {
                mesh_path_add(target_addr, sdata);
                mpath = mesh_path_lookup(target_addr, sdata);
                if (!mpath) {
-                       sdata->u.mesh.mshstats.dropped_frames_no_route++;
+                       mesh_path_discard_frame(skb, sdata);
                        err = -ENOSPC;
                        goto endlookup;
                }
        }
 
-       if (mpath->flags & MESH_PATH_ACTIVE) {
-               if (time_after(jiffies,
-                              mpath->exp_time -
-                              msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
-                   !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
-                   !(mpath->flags & MESH_PATH_RESOLVING) &&
-                   !(mpath->flags & MESH_PATH_FIXED)) {
-                       mesh_queue_preq(mpath,
-                                       PREQ_Q_F_START | PREQ_Q_F_REFRESH);
-               }
-               next_hop = rcu_dereference(mpath->next_hop);
-               if (next_hop)
-                       memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
-               else
-                       err = -ENOENT;
-       } else {
-               struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-               if (!(mpath->flags & MESH_PATH_RESOLVING)) {
-                       /* Start discovery only if it is not running yet */
-                       mesh_queue_preq(mpath, PREQ_Q_F_START);
-               }
+       if (!(mpath->flags & MESH_PATH_RESOLVING))
+               mesh_queue_preq(mpath, PREQ_Q_F_START);
+
+       if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
+               skb_to_free = skb_dequeue(&mpath->frame_queue);
 
-               if (skb_queue_len(&mpath->frame_queue) >=
-                               MESH_FRAME_QUEUE_LEN)
-                       skb_to_free = skb_dequeue(&mpath->frame_queue);
+       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+       ieee80211_set_qos_hdr(sdata, skb);
+       skb_queue_tail(&mpath->frame_queue, skb);
+       err = -ENOENT;
+       if (skb_to_free)
+               mesh_path_discard_frame(skb_to_free, sdata);
+
+endlookup:
+       rcu_read_unlock();
+       return err;
+}
+/**
+ * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
+ * this function is considered "using" the associated mpath, so preempt a path
+ * refresh if this mpath expires soon.
+ *
+ * @skb: 802.11 frame to be sent
+ * @sdata: network subif the frame will be sent through
+ *
+ * Returns: 0 if the next hop was found. Nonzero otherwise.
+ */
+int mesh_nexthop_lookup(struct sk_buff *skb,
+                       struct ieee80211_sub_if_data *sdata)
+{
+       struct mesh_path *mpath;
+       struct sta_info *next_hop;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       u8 *target_addr = hdr->addr3;
+       int err = -ENOENT;
+
+       rcu_read_lock();
+       mpath = mesh_path_lookup(target_addr, sdata);
+
+       if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
+               goto endlookup;
+
+       if (time_after(jiffies,
+                      mpath->exp_time -
+                      msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
+           !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
+           !(mpath->flags & MESH_PATH_RESOLVING) &&
+           !(mpath->flags & MESH_PATH_FIXED))
+               mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
 
-               info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-               skb_queue_tail(&mpath->frame_queue, skb);
-               if (skb_to_free)
-                       mesh_path_discard_frame(skb_to_free, sdata);
-               err = -ENOENT;
+       next_hop = rcu_dereference(mpath->next_hop);
+       if (next_hop) {
+               memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               err = 0;
        }
 
 endlookup:
@@ -1061,6 +1097,7 @@ void mesh_path_timer(unsigned long data)
        } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
                ++mpath->discovery_retries;
                mpath->discovery_timeout *= 2;
+               mpath->flags &= ~MESH_PATH_REQ_QUEUED;
                spin_unlock_bh(&mpath->state_lock);
                mesh_queue_preq(mpath, 0);
        } else {
index 7f54c50..edf167e 100644 (file)
@@ -69,8 +69,6 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
                lockdep_is_held(&pathtbl_resize_lock));
 }
 
-static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
-
 /*
  * CAREFUL -- "tbl" must not be an expression,
  * in particular not an rcu_dereference(), since
@@ -213,7 +211,6 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
        struct ieee80211_hdr *hdr;
        struct sk_buff_head tmpq;
        unsigned long flags;
-       struct ieee80211_sub_if_data *sdata = mpath->sdata;
 
        rcu_assign_pointer(mpath->next_hop, sta);
 
@@ -224,8 +221,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
        while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
                hdr = (struct ieee80211_hdr *) skb->data;
                memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
-               skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
-               ieee80211_set_qos_hdr(sdata, skb);
+               memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
                __skb_queue_tail(&tmpq, skb);
        }
 
@@ -269,6 +265,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
        next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
        memcpy(hdr->addr1, next_hop, ETH_ALEN);
        rcu_read_unlock();
+       memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
        memcpy(hdr->addr3, dst_addr, ETH_ALEN);
 }
 
@@ -423,21 +420,18 @@ static void mesh_gate_node_reclaim(struct rcu_head *rp)
 }
 
 /**
- * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
- * @mesh_tbl: table which contains known_gates list
- * @mpath: mpath to known mesh gate
- *
- * Returns: 0 on success
- *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
  */
-static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+int mesh_path_add_gate(struct mesh_path *mpath)
 {
+       struct mesh_table *tbl;
        struct mpath_node *gate, *new_gate;
        struct hlist_node *n;
        int err;
 
        rcu_read_lock();
-       tbl = rcu_dereference(tbl);
+       tbl = rcu_dereference(mesh_paths);
 
        hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
                if (gate->mpath == mpath) {
@@ -481,8 +475,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
        struct mpath_node *gate;
        struct hlist_node *p, *q;
 
-       tbl = rcu_dereference(tbl);
-
        hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
                if (gate->mpath == mpath) {
                        spin_lock_bh(&tbl->gates_lock);
@@ -501,16 +493,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
 }
 
 /**
- *
- * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
- * @mpath: gate path to add to table
- */
-int mesh_path_add_gate(struct mesh_path *mpath)
-{
-       return mesh_gate_add(mesh_paths, mpath);
-}
-
-/**
  * mesh_gate_num - number of gates known to this interface
  * @sdata: subif data
  */
@@ -991,38 +973,11 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
  * @skb: frame to discard
  * @sdata: network subif the frame was to be sent through
  *
- * If the frame was being forwarded from another MP, a PERR frame will be sent
- * to the precursor.  The precursor's address (i.e. the previous hop) was saved
- * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
- * the destination is successfully resolved.
- *
  * Locking: the function must me called within a rcu_read_lock region
  */
 void mesh_path_discard_frame(struct sk_buff *skb,
                             struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       struct mesh_path *mpath;
-       u32 sn = 0;
-       __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
-
-       if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
-               u8 *ra, *da;
-
-               da = hdr->addr3;
-               ra = hdr->addr1;
-               rcu_read_lock();
-               mpath = mesh_path_lookup(da, sdata);
-               if (mpath) {
-                       spin_lock_bh(&mpath->state_lock);
-                       sn = ++mpath->sn;
-                       spin_unlock_bh(&mpath->state_lock);
-               }
-               rcu_read_unlock();
-               mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
-                                  cpu_to_le32(sn), reason, ra, sdata);
-       }
-
        kfree_skb(skb);
        sdata->u.mesh.mshstats.dropped_frames_no_route++;
 }
index 7e57f5d..7314372 100644 (file)
@@ -80,11 +80,15 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
  *       on it in the lifecycle management section!
  */
 static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
-                                        u8 *hw_addr, u32 rates)
+                                        u8 *hw_addr, u32 rates,
+                                        struct ieee802_11_elems *elems)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_supported_band *sband;
        struct sta_info *sta;
 
+       sband = local->hw.wiphy->bands[local->oper_channel->band];
+
        if (local->num_sta >= MESH_MAX_PLINKS)
                return NULL;
 
@@ -96,6 +100,10 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
        set_sta_flag(sta, WLAN_STA_AUTHORIZED);
        set_sta_flag(sta, WLAN_STA_WME);
        sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
+       if (elems->ht_cap_elem)
+               ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
+                                                 elems->ht_cap_elem,
+                                                 &sta->sta.ht_cap);
        rate_control_rate_init(sta);
 
        return sta;
@@ -153,23 +161,31 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                enum ieee80211_self_protected_actioncode action,
                u8 *da, __le16 llid, __le16 plid, __le16 reason) {
        struct ieee80211_local *local = sdata->local;
-       struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
-                       sdata->u.mesh.ie_len);
+       struct sk_buff *skb;
        struct ieee80211_mgmt *mgmt;
        bool include_plid = false;
-       int ie_len = 4;
        u16 peering_proto = 0;
-       u8 *pos;
-
+       u8 *pos, ie_len = 4;
+       int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
+                     sizeof(mgmt->u.action.u.self_prot);
+
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+                           hdr_len +
+                           2 + /* capability info */
+                           2 + /* AID */
+                           2 + 8 + /* supported rates */
+                           2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+                           2 + sdata->u.mesh.mesh_id_len +
+                           2 + sizeof(struct ieee80211_meshconf_ie) +
+                           2 + sizeof(struct ieee80211_ht_cap) +
+                           2 + sizeof(struct ieee80211_ht_info) +
+                           2 + 8 + /* peering IE */
+                           sdata->u.mesh.ie_len);
        if (!skb)
                return -1;
        skb_reserve(skb, local->hw.extra_tx_headroom);
-       /* 25 is the size of the common mgmt part (24) plus the size of the
-        * common action part (1)
-        */
-       mgmt = (struct ieee80211_mgmt *)
-               skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
-       memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
+       mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+       memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                          IEEE80211_STYPE_ACTION);
        memcpy(mgmt->da, da, ETH_ALEN);
@@ -235,6 +251,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                memcpy(pos, &reason, 2);
                pos += 2;
        }
+
+       if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+               if (mesh_add_ht_cap_ie(skb, sdata) ||
+                   mesh_add_ht_info_ie(skb, sdata))
+                       return -1;
+       }
+
        if (mesh_add_vendor_ies(skb, sdata))
                return -1;
 
@@ -261,7 +284,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates,
                                        elems->ie_start, elems->total_len,
                                        GFP_KERNEL);
                else
-                       sta = mesh_plink_alloc(sdata, hw_addr, rates);
+                       sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
                if (!sta)
                        return;
                if (sta_info_insert_rcu(sta)) {
@@ -552,7 +575,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
                }
 
                rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
-               sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
+               sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
                if (!sta) {
                        mpl_dbg("Mesh plink error: plink table full\n");
                        return;
index b1b1bb3..09019d1 100644 (file)
@@ -209,6 +209,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
                channel_type = NL80211_CHAN_HT20;
 
                if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
+                   !ieee80111_cfg_override_disables_ht40(sdata) &&
                    (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
                    (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
                        switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
@@ -1120,6 +1121,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        /* on the next assoc, re-program HT parameters */
        sdata->ht_opmode_valid = false;
+       memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
+       memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 
        local->power_constr_level = 0;
 
@@ -1359,9 +1362,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
        ieee80211_set_disassoc(sdata, true, true);
        mutex_unlock(&ifmgd->mtx);
 
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
        /*
         * must be outside lock due to cfg80211,
         * but that's not a problem.
@@ -1370,6 +1370,10 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
                                       IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
                                       NULL, true);
+
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(local);
+       mutex_unlock(&local->mtx);
 }
 
 void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1468,6 +1472,47 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
        return RX_MGMT_CFG80211_DISASSOC;
 }
 
+static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
+                               u8 *supp_rates, unsigned int supp_rates_len,
+                               u32 *rates, u32 *basic_rates,
+                               bool *have_higher_than_11mbit,
+                               int *min_rate, int *min_rate_index)
+{
+       int i, j;
+
+       for (i = 0; i < supp_rates_len; i++) {
+               int rate = (supp_rates[i] & 0x7f) * 5;
+               bool is_basic = !!(supp_rates[i] & 0x80);
+
+               if (rate > 110)
+                       *have_higher_than_11mbit = true;
+
+               /*
+                * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009
+                * 7.3.2.2 as a magic value instead of a rate. Hence, skip it.
+                *
+                * Note: Even through the membership selector and the basic
+                *       rate flag share the same bit, they are not exactly
+                *       the same.
+                */
+               if (!!(supp_rates[i] & 0x80) &&
+                   (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+                       continue;
+
+               for (j = 0; j < sband->n_bitrates; j++) {
+                       if (sband->bitrates[j].bitrate == rate) {
+                               *rates |= BIT(j);
+                               if (is_basic)
+                                       *basic_rates |= BIT(j);
+                               if (rate < *min_rate) {
+                                       *min_rate = rate;
+                                       *min_rate_index = j;
+                               }
+                               break;
+                       }
+               }
+       }
+}
 
 static bool ieee80211_assoc_success(struct ieee80211_work *wk,
                                    struct ieee80211_mgmt *mgmt, size_t len)
@@ -1484,7 +1529,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
        struct ieee802_11_elems elems;
        struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
        u32 changed = 0;
-       int i, j, err;
+       int err;
        bool have_higher_than_11mbit = false;
        u16 ap_ht_cap_flags;
        int min_rate = INT_MAX, min_rate_index = -1;
@@ -1534,7 +1579,6 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
 
        set_sta_flag(sta, WLAN_STA_AUTH);
        set_sta_flag(sta, WLAN_STA_ASSOC);
-       set_sta_flag(sta, WLAN_STA_ASSOC_AP);
        if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
                set_sta_flag(sta, WLAN_STA_AUTHORIZED);
 
@@ -1542,47 +1586,14 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
        basic_rates = 0;
        sband = local->hw.wiphy->bands[wk->chan->band];
 
-       for (i = 0; i < elems.supp_rates_len; i++) {
-               int rate = (elems.supp_rates[i] & 0x7f) * 5;
-               bool is_basic = !!(elems.supp_rates[i] & 0x80);
-
-               if (rate > 110)
-                       have_higher_than_11mbit = true;
-
-               for (j = 0; j < sband->n_bitrates; j++) {
-                       if (sband->bitrates[j].bitrate == rate) {
-                               rates |= BIT(j);
-                               if (is_basic)
-                                       basic_rates |= BIT(j);
-                               if (rate < min_rate) {
-                                       min_rate = rate;
-                                       min_rate_index = j;
-                               }
-                               break;
-                       }
-               }
-       }
+       ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len,
+                           &rates, &basic_rates, &have_higher_than_11mbit,
+                           &min_rate, &min_rate_index);
 
-       for (i = 0; i < elems.ext_supp_rates_len; i++) {
-               int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
-               bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
-
-               if (rate > 110)
-                       have_higher_than_11mbit = true;
-
-               for (j = 0; j < sband->n_bitrates; j++) {
-                       if (sband->bitrates[j].bitrate == rate) {
-                               rates |= BIT(j);
-                               if (is_basic)
-                                       basic_rates |= BIT(j);
-                               if (rate < min_rate) {
-                                       min_rate = rate;
-                                       min_rate_index = j;
-                               }
-                               break;
-                       }
-               }
-       }
+       ieee80211_get_rates(sband, elems.ext_supp_rates,
+                           elems.ext_supp_rates_len, &rates, &basic_rates,
+                           &have_higher_than_11mbit,
+                           &min_rate, &min_rate_index);
 
        /*
         * some buggy APs don't advertise basic_rates. use the lowest
@@ -1605,7 +1616,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
                sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
 
        if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
-               ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+               ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                elems.ht_cap_elem, &sta->sta.ht_cap);
 
        ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -1974,7 +1985,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
 
                sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
 
-               ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
+               ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                elems.ht_cap_elem, &sta->sta.ht_cap);
 
                ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -2128,9 +2139,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, true, true);
        mutex_unlock(&ifmgd->mtx);
-       mutex_lock(&local->mtx);
-       ieee80211_recalc_idle(local);
-       mutex_unlock(&local->mtx);
        /*
         * must be outside lock due to cfg80211,
         * but that's not a problem.
@@ -2138,6 +2146,11 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
        ieee80211_send_deauth_disassoc(sdata, bssid,
                        IEEE80211_STYPE_DEAUTH, reason,
                        NULL, true);
+
+       mutex_lock(&local->mtx);
+       ieee80211_recalc_idle(local);
+       mutex_unlock(&local->mtx);
+
        mutex_lock(&ifmgd->mtx);
 }
 
@@ -2632,6 +2645,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                        ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
 
 
+       if (req->flags & ASSOC_REQ_DISABLE_HT)
+               ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+
+       memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
+       memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
+              sizeof(ifmgd->ht_capa_mask));
+
        if (req->ie && req->ie_len) {
                memcpy(wk->ie, req->ie, req->ie_len);
                wk->ie_len = req->ie_len;
index 3d41441..e4330d8 100644 (file)
@@ -156,7 +156,6 @@ void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
 }
 
 void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool enable_beaconing,
                                 bool offchannel_ps_disable)
 {
        struct ieee80211_sub_if_data *sdata;
@@ -188,11 +187,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        netif_tx_wake_all_queues(sdata->dev);
                }
 
-               /* Check to see if we should re-enable beaconing */
-               if (enable_beaconing &&
-                   (sdata->vif.type == NL80211_IFTYPE_AP ||
-                    sdata->vif.type == NL80211_IFTYPE_ADHOC ||
-                    sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
+               if (sdata->vif.type == NL80211_IFTYPE_AP ||
+                   sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+                   sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
                        ieee80211_bss_info_change_notify(
                                sdata, BSS_CHANGED_BEACON_ENABLED);
        }
@@ -212,8 +209,6 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
                return;
        }
 
-       ieee80211_recalc_idle(local);
-
        if (local->hw_roc_skb) {
                sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
                ieee80211_tx_skb(sdata, local->hw_roc_skb);
@@ -227,6 +222,8 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
                                          GFP_KERNEL);
        }
 
+       ieee80211_recalc_idle(local);
+
        mutex_unlock(&local->mtx);
 }
 
index 9ee7164..596efaf 100644 (file)
@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                ieee80211_bss_info_change_notify(sdata,
                        BSS_CHANGED_BEACON_ENABLED);
 
-               drv_remove_interface(local, &sdata->vif);
+               drv_remove_interface(local, sdata);
        }
 
        /* stop hardware - this must stop RX */
index 58a8955..b39dda5 100644 (file)
@@ -334,8 +334,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
 
 
 static void
-calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
-                    struct minstrel_rate *d, struct ieee80211_rate *rate)
+calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
+                   struct ieee80211_rate *rate)
 {
        int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
 
@@ -402,8 +402,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
 
                mr->rix = i;
                mr->bitrate = sband->bitrates[i].bitrate / 5;
-               calc_rate_durations(mi, local, mr,
-                               &sband->bitrates[i]);
+               calc_rate_durations(local, mr, &sband->bitrates[i]);
 
                /* calculate maximum number of retransmissions before
                 * fallback (based on maximum segment size) */
index cdb2853..ff5f7b8 100644 (file)
 /* Transmit duration for the raw data part of an average sized packet */
 #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
 
+/*
+ * Define group sort order: HT40 -> SGI -> #streams
+ */
+#define GROUP_IDX(_streams, _sgi, _ht40)       \
+       MINSTREL_MAX_STREAMS * 2 * _ht40 +      \
+       MINSTREL_MAX_STREAMS * _sgi +           \
+       _streams - 1
+
 /* MCS rate information for an MCS group */
-#define MCS_GROUP(_streams, _sgi, _ht40) {                             \
+#define MCS_GROUP(_streams, _sgi, _ht40)                               \
+       [GROUP_IDX(_streams, _sgi, _ht40)] = {                          \
        .streams = _streams,                                            \
        .flags =                                                        \
                (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |                 \
@@ -58,6 +67,9 @@
  * To enable sufficiently targeted rate sampling, MCS rates are divided into
  * groups, based on the number of streams and flags (HT40, SGI) that they
  * use.
+ *
+ * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
+ * HT40 -> SGI -> #streams
  */
 const struct mcs_group minstrel_mcs_groups[] = {
        MCS_GROUP(1, 0, 0),
@@ -102,21 +114,9 @@ minstrel_ewma(int old, int new, int weight)
 static int
 minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
 {
-       int streams = (rate->idx / MCS_GROUP_RATES) + 1;
-       u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
-               if (minstrel_mcs_groups[i].streams != streams)
-                       continue;
-               if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
-                       continue;
-
-               return i;
-       }
-
-       WARN_ON(1);
-       return 0;
+       return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
+                        !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
+                        !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
 }
 
 static inline struct minstrel_rate_stats *
@@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
  * Recalculate success probabilities and counters for a rate using EWMA
  */
 static void
-minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
+minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
 {
        if (unlikely(mr->attempts > 0)) {
                mr->sample_skipped = 0;
@@ -156,8 +156,7 @@ minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr
  * the expected number of retransmissions and their expected length
  */
 static void
-minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
-                    int group, int rate)
+minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
 {
        struct minstrel_rate_stats *mr;
        unsigned int usecs;
@@ -226,8 +225,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
                        mr = &mg->rates[i];
                        mr->retry_updated = false;
                        index = MCS_GROUP_RATES * group + i;
-                       minstrel_calc_rate_ewma(mp, mr);
-                       minstrel_ht_calc_tp(mp, mi, group, i);
+                       minstrel_calc_rate_ewma(mr);
+                       minstrel_ht_calc_tp(mi, group, i);
 
                        if (!mr->cur_tp)
                                continue;
@@ -300,10 +299,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
 static bool
 minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
 {
-       if (!rate->count)
+       if (rate->idx < 0)
                return false;
 
-       if (rate->idx < 0)
+       if (!rate->count)
                return false;
 
        return !!(rate->flags & IEEE80211_TX_RC_MCS);
@@ -357,7 +356,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
 }
 
 static void
-minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
+minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@@ -455,7 +454,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
        if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
                minstrel_ht_update_stats(mp, mi);
                if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
-                       minstrel_aggr_check(mp, sta, skb);
+                       minstrel_aggr_check(sta, skb);
        }
 }
 
@@ -515,7 +514,6 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
 static void
 minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                      struct ieee80211_tx_rate *rate, int index,
-                     struct ieee80211_tx_rate_control *txrc,
                      bool sample, bool rtscts)
 {
        const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@@ -628,11 +626,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        if (sample_idx >= 0) {
                sample = true;
                minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
-                       txrc, true, false);
+                       true, false);
                info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
        } else {
                minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
-                       txrc, false, false);
+                       false, false);
        }
 
        if (mp->hw->max_rates >= 3) {
@@ -643,13 +641,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                 */
                if (sample_idx >= 0)
                        minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
-                               txrc, false, false);
+                               false, false);
                else
                        minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
-                               txrc, false, true);
+                               false, true);
 
                minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
-                                    txrc, false, !sample);
+                                    false, !sample);
 
                ar[3].count = 0;
                ar[3].idx = -1;
@@ -660,7 +658,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                 * max_tp_rate -> max_prob_rate by default.
                 */
                minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
-                                    txrc, false, !sample);
+                                    false, !sample);
 
                ar[2].count = 0;
                ar[2].idx = -1;
index fb123e2..7d22641 100644 (file)
@@ -748,10 +748,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
        struct ieee80211_local *local = rx->local;
        struct ieee80211_hw *hw = &local->hw;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct sta_info *sta = rx->sta;
        struct tid_ampdu_rx *tid_agg_rx;
        u16 sc;
-       int tid;
+       u8 tid, ack_policy;
 
        if (!ieee80211_is_data_qos(hdr->frame_control))
                goto dont_reorder;
@@ -764,6 +765,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
        if (!sta)
                goto dont_reorder;
 
+       ack_policy = *ieee80211_get_qos_ctl(hdr) &
+                    IEEE80211_QOS_CTL_ACK_POLICY_MASK;
        tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
 
        tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -774,6 +777,15 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
        if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
                goto dont_reorder;
 
+       /* not part of a BA session */
+       if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+           ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
+               goto dont_reorder;
+
+       /* not actually part of this BA session */
+       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+               goto dont_reorder;
+
        /* new, potentially un-ordered, ampdu frame - process it */
 
        /* reset session timer */
@@ -858,6 +870,13 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
                            rx->sdata->control_port_protocol)
                                return RX_CONTINUE;
                }
+
+               if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+                   cfg80211_rx_spurious_frame(rx->sdata->dev,
+                                              hdr->addr2,
+                                              GFP_ATOMIC))
+                       return RX_DROP_UNUSABLE;
+
                return RX_DROP_MONITOR;
        }
 
@@ -1327,15 +1346,20 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 
                /*
                 * If we receive a 4-addr nullfunc frame from a STA
-                * that was not moved to a 4-addr STA vlan yet, drop
-                * the frame to the monitor interface, to make sure
-                * that hostapd sees it
+                * that was not moved to a 4-addr STA vlan yet send
+                * the event to userspace and for older hostapd drop
+                * the frame to the monitor interface.
                 */
                if (ieee80211_has_a4(hdr->frame_control) &&
                    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
                     (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-                     !rx->sdata->u.vlan.sta)))
+                     !rx->sdata->u.vlan.sta))) {
+                       if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
+                               cfg80211_rx_unexpected_4addr_frame(
+                                       rx->sdata->dev, sta->sta.addr,
+                                       GFP_ATOMIC);
                        return RX_DROP_MONITOR;
+               }
                /*
                 * Update counter and free packet here to avoid
                 * counting this as a dropped packed.
@@ -1871,13 +1895,16 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
 static ieee80211_rx_result
 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
 {
-       struct ieee80211_hdr *hdr;
+       struct ieee80211_hdr *fwd_hdr, *hdr;
+       struct ieee80211_tx_info *info;
        struct ieee80211s_hdr *mesh_hdr;
-       unsigned int hdrlen;
        struct sk_buff *skb = rx->skb, *fwd_skb;
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
+       u16 q, hdrlen;
 
        hdr = (struct ieee80211_hdr *) skb->data;
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -1893,15 +1920,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                return RX_CONTINUE;
 
        if (!mesh_hdr->ttl)
-               /* illegal frame */
                return RX_DROP_MONITOR;
 
-       if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
-               IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                               dropped_frames_congestion);
-               return RX_DROP_MONITOR;
-       }
-
        if (mesh_hdr->flags & MESH_FLAGS_AE) {
                struct mesh_path *mppath;
                char *proxied_addr;
@@ -1933,60 +1953,50 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
            compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
                return RX_CONTINUE;
 
-       mesh_hdr->ttl--;
+       q = ieee80211_select_queue_80211(local, skb, hdr);
+       if (ieee80211_queue_stopped(&local->hw, q)) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+               return RX_DROP_MONITOR;
+       }
+       skb_set_queue_mapping(skb, q);
 
-       if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
-               if (!mesh_hdr->ttl)
-                       IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
-                                                    dropped_frames_ttl);
-               else {
-                       struct ieee80211_hdr *fwd_hdr;
-                       struct ieee80211_tx_info *info;
-
-                       fwd_skb = skb_copy(skb, GFP_ATOMIC);
-
-                       if (!fwd_skb && net_ratelimit())
-                               printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
-                                                  sdata->name);
-                       if (!fwd_skb)
-                               goto out;
-
-                       fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
-                       memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
-                       info = IEEE80211_SKB_CB(fwd_skb);
-                       memset(info, 0, sizeof(*info));
-                       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
-                       info->control.vif = &rx->sdata->vif;
-                       if (is_multicast_ether_addr(fwd_hdr->addr1)) {
-                               IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                                               fwded_mcast);
-                               skb_set_queue_mapping(fwd_skb,
-                                       ieee80211_select_queue(sdata, fwd_skb));
-                               ieee80211_set_qos_hdr(sdata, fwd_skb);
-                       } else {
-                               int err;
-                               /*
-                                * Save TA to addr1 to send TA a path error if a
-                                * suitable next hop is not found
-                                */
-                               memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
-                                               ETH_ALEN);
-                               err = mesh_nexthop_lookup(fwd_skb, sdata);
-                               /* Failed to immediately resolve next hop:
-                                * fwded frame was dropped or will be added
-                                * later to the pending skb queue.  */
-                               if (err)
-                                       return RX_DROP_MONITOR;
-
-                               IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                                               fwded_unicast);
-                       }
-                       IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
-                                                    fwded_frames);
-                       ieee80211_add_pending_skb(local, fwd_skb);
-               }
+       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+               goto out;
+
+       if (!--mesh_hdr->ttl) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+               return RX_DROP_MONITOR;
+       }
+
+       fwd_skb = skb_copy(skb, GFP_ATOMIC);
+       if (!fwd_skb) {
+               if (net_ratelimit())
+                       printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
+                                       sdata->name);
+               goto out;
+       }
+
+       fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
+       info = IEEE80211_SKB_CB(fwd_skb);
+       memset(info, 0, sizeof(*info));
+       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+       info->control.vif = &rx->sdata->vif;
+       info->control.jiffies = jiffies;
+       if (is_multicast_ether_addr(fwd_hdr->addr1)) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
+               memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
+       } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) {
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
+       } else {
+               /* unable to resolve next hop */
+               mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
+                                   0, reason, fwd_hdr->addr2, sdata);
+               IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+               return RX_DROP_MONITOR;
        }
 
+       IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
+       ieee80211_add_pending_skb(local, fwd_skb);
  out:
        if (is_multicast_ether_addr(hdr->addr1) ||
            sdata->dev->flags & IFF_PROMISC)
@@ -2014,12 +2024,17 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
                return RX_DROP_MONITOR;
 
        /*
-        * Allow the cooked monitor interface of an AP to see 4-addr frames so
-        * that a 4-addr station can be detected and moved into a separate VLAN
+        * Send unexpected-4addr-frame event to hostapd. For older versions,
+        * also drop the frame to cooked monitor interfaces.
         */
        if (ieee80211_has_a4(hdr->frame_control) &&
-           sdata->vif.type == NL80211_IFTYPE_AP)
+           sdata->vif.type == NL80211_IFTYPE_AP) {
+               if (rx->sta &&
+                   !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
+                       cfg80211_rx_unexpected_4addr_frame(
+                               rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
                return RX_DROP_MONITOR;
+       }
 
        err = __ieee80211_data_to_8023(rx, &port_control);
        if (unlikely(err))
@@ -2174,6 +2189,18 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
        if (!ieee80211_is_mgmt(mgmt->frame_control))
                return RX_DROP_MONITOR;
 
+       if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
+           ieee80211_is_beacon(mgmt->frame_control) &&
+           !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
+               struct ieee80211_rx_status *status;
+
+               status = IEEE80211_SKB_RXCB(rx->skb);
+               cfg80211_report_obss_beacon(rx->local->hw.wiphy,
+                                           rx->skb->data, rx->skb->len,
+                                           status->freq, GFP_ATOMIC);
+               rx->flags |= IEEE80211_RX_BEACON_REPORTED;
+       }
+
        if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
                return RX_DROP_MONITOR;
 
@@ -2207,15 +2234,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
        switch (mgmt->u.action.category) {
        case WLAN_CATEGORY_BACK:
-               /*
-                * The aggregation code is not prepared to handle
-                * anything but STA/AP due to the BSSID handling;
-                * IBSS could work in the code but isn't supported
-                * by drivers or the standard.
-                */
                if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+                   sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
                    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
-                   sdata->vif.type != NL80211_IFTYPE_AP)
+                   sdata->vif.type != NL80211_IFTYPE_AP &&
+                   sdata->vif.type != NL80211_IFTYPE_ADHOC)
                        break;
 
                /* verify action_code is present */
@@ -2493,6 +2516,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
                goto out_free_skb;
        rx->flags |= IEEE80211_RX_CMNTR;
 
+       /* If there are no cooked monitor interfaces, just free the SKB */
+       if (!local->cooked_mntrs)
+               goto out_free_skb;
+
        if (skb_headroom(skb) < sizeof(*rthdr) &&
            pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
                goto out_free_skb;
@@ -2770,10 +2797,17 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
                                return 0;
                } else if (!ieee80211_bssid_match(bssid,
                                        sdata->vif.addr)) {
+                       /*
+                        * Accept public action frames even when the
+                        * BSSID doesn't match, this is used for P2P
+                        * and location updates. Note that mac80211
+                        * itself never looks at these frames.
+                        */
+                       if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+                           ieee80211_is_public_action(hdr, skb->len))
+                               return 1;
                        if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
-                           !ieee80211_is_beacon(hdr->frame_control) &&
-                           !(ieee80211_is_action(hdr->frame_control) &&
-                             sdata->vif.p2p))
+                           !ieee80211_is_beacon(hdr->frame_control))
                                return 0;
                        status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
                }
index 105436d..2c5041c 100644 (file)
@@ -213,12 +213,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        if (bss)
                ieee80211_rx_bss_put(sdata->local, bss);
 
-       /* If we are on-operating-channel, and this packet is for the
-        * current channel, pass the pkt on up the stack so that
-        * the rest of the stack can make use of it.
-        */
-       if (ieee80211_cfg_on_oper_channel(sdata->local)
-           && (channel == sdata->local->oper_channel))
+       if (channel == sdata->local->oper_channel)
                return RX_CONTINUE;
 
        dev_kfree_skb(skb);
@@ -264,8 +259,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
                                       bool was_hw_scan)
 {
        struct ieee80211_local *local = hw_to_local(hw);
-       bool on_oper_chan;
-       bool enable_beacons = false;
 
        lockdep_assert_held(&local->mtx);
 
@@ -298,25 +291,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
        local->scanning = 0;
        local->scan_channel = NULL;
 
-       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
-       if (was_hw_scan || !on_oper_chan)
-               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-       else
-               /* Set power back to normal operating levels. */
-               ieee80211_hw_config(local, 0);
+       /* Set power back to normal operating levels. */
+       ieee80211_hw_config(local, 0);
 
        if (!was_hw_scan) {
-               bool on_oper_chan2;
                ieee80211_configure_filter(local);
                drv_sw_scan_complete(local);
-               on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
-               /* We should always be on-channel at this point. */
-               WARN_ON(!on_oper_chan2);
-               if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
-                       enable_beacons = true;
-
-               ieee80211_offchannel_return(local, enable_beacons, true);
+               ieee80211_offchannel_return(local, true);
        }
 
        ieee80211_recalc_idle(local);
@@ -361,11 +342,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
        local->next_scan_state = SCAN_DECISION;
        local->scan_channel_idx = 0;
 
-       /* We always want to use off-channel PS, even if we
-        * are not really leaving oper-channel.  Don't
-        * tell the AP though, as long as we are on-channel.
-        */
-       ieee80211_offchannel_enable_all_ps(local, false);
+       ieee80211_offchannel_stop_vifs(local, true);
 
        ieee80211_configure_filter(local);
 
@@ -373,8 +350,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
        ieee80211_hw_config(local, 0);
 
        ieee80211_queue_delayed_work(&local->hw,
-                                    &local->scan_work,
-                                    IEEE80211_CHANNEL_TIME);
+                                    &local->scan_work, 0);
 
        return 0;
 }
@@ -510,96 +486,39 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
 
        next_chan = local->scan_req->channels[local->scan_channel_idx];
 
-       if (ieee80211_cfg_on_oper_channel(local)) {
-               /* We're currently on operating channel. */
-               if (next_chan == local->oper_channel)
-                       /* We don't need to move off of operating channel. */
-                       local->next_scan_state = SCAN_SET_CHANNEL;
-               else
-                       /*
-                        * We do need to leave operating channel, as next
-                        * scan is somewhere else.
-                        */
-                       local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
-       } else {
-               /*
-                * we're currently scanning a different channel, let's
-                * see if we can scan another channel without interfering
-                * with the current traffic situation.
-                *
-                * Since we don't know if the AP has pending frames for us
-                * we can only check for our tx queues and use the current
-                * pm_qos requirements for rx. Hence, if no tx traffic occurs
-                * at all we will scan as many channels in a row as the pm_qos
-                * latency allows us to. Additionally we also check for the
-                * currently negotiated listen interval to prevent losing
-                * frames unnecessarily.
-                *
-                * Otherwise switch back to the operating channel.
-                */
-
-               bad_latency = time_after(jiffies +
-                               ieee80211_scan_get_channel_time(next_chan),
-                               local->leave_oper_channel_time +
-                               usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
-
-               listen_int_exceeded = time_after(jiffies +
-                               ieee80211_scan_get_channel_time(next_chan),
-                               local->leave_oper_channel_time +
-                               usecs_to_jiffies(min_beacon_int * 1024) *
-                               local->hw.conf.listen_interval);
-
-               if (associated && ( !tx_empty || bad_latency ||
-                   listen_int_exceeded))
-                       local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
-               else
-                       local->next_scan_state = SCAN_SET_CHANNEL;
-       }
-
-       *next_delay = 0;
-}
-
-static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
-                                                   unsigned long *next_delay)
-{
-       /* PS will already be in off-channel mode,
-        * we do that once at the beginning of scanning.
-        */
-       ieee80211_offchannel_stop_vifs(local, false);
-
        /*
-        * What if the nullfunc frames didn't arrive?
+        * we're currently scanning a different channel, let's
+        * see if we can scan another channel without interfering
+        * with the current traffic situation.
+        *
+        * Since we don't know if the AP has pending frames for us
+        * we can only check for our tx queues and use the current
+        * pm_qos requirements for rx. Hence, if no tx traffic occurs
+        * at all we will scan as many channels in a row as the pm_qos
+        * latency allows us to. Additionally we also check for the
+        * currently negotiated listen interval to prevent losing
+        * frames unnecessarily.
+        *
+        * Otherwise switch back to the operating channel.
         */
-       drv_flush(local, false);
-       if (local->ops->flush)
-               *next_delay = 0;
-       else
-               *next_delay = HZ / 10;
 
-       /* remember when we left the operating channel */
-       local->leave_oper_channel_time = jiffies;
+       bad_latency = time_after(jiffies +
+                       ieee80211_scan_get_channel_time(next_chan),
+                       local->leave_oper_channel_time +
+                       usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
 
-       /* advance to the next channel to be scanned */
-       local->next_scan_state = SCAN_SET_CHANNEL;
-}
-
-static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
-                                                   unsigned long *next_delay)
-{
-       /* switch back to the operating channel */
-       local->scan_channel = NULL;
-       if (!ieee80211_cfg_on_oper_channel(local))
-               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+       listen_int_exceeded = time_after(jiffies +
+                       ieee80211_scan_get_channel_time(next_chan),
+                       local->leave_oper_channel_time +
+                       usecs_to_jiffies(min_beacon_int * 1024) *
+                       local->hw.conf.listen_interval);
 
-       /*
-        * Re-enable vifs and beaconing.  Leave PS
-        * in off-channel state..will put that back
-        * on-channel at the end of scanning.
-        */
-       ieee80211_offchannel_return(local, true, false);
+       if (associated && (!tx_empty || bad_latency || listen_int_exceeded))
+               local->next_scan_state = SCAN_SUSPEND;
+       else
+               local->next_scan_state = SCAN_SET_CHANNEL;
 
-       *next_delay = HZ / 5;
-       local->next_scan_state = SCAN_DECISION;
+       *next_delay = 0;
 }
 
 static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
@@ -613,10 +532,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
 
        local->scan_channel = chan;
 
-       /* Only call hw-config if we really need to change channels. */
-       if (chan != local->hw.conf.channel)
-               if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
-                       skip = 1;
+       if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+               skip = 1;
 
        /* advance state machine to next channel/band */
        local->scan_channel_idx++;
@@ -673,6 +590,44 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
        local->next_scan_state = SCAN_DECISION;
 }
 
+static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
+                                        unsigned long *next_delay)
+{
+       /* switch back to the operating channel */
+       local->scan_channel = NULL;
+       ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+       /*
+        * Re-enable vifs and beaconing.  Leave PS
+        * in off-channel state..will put that back
+        * on-channel at the end of scanning.
+        */
+       ieee80211_offchannel_return(local, false);
+
+       *next_delay = HZ / 5;
+       /* afterwards, resume scan & go to next channel */
+       local->next_scan_state = SCAN_RESUME;
+}
+
+static void ieee80211_scan_state_resume(struct ieee80211_local *local,
+                                       unsigned long *next_delay)
+{
+       /* PS already is in off-channel mode */
+       ieee80211_offchannel_stop_vifs(local, false);
+
+       if (local->ops->flush) {
+               drv_flush(local, false);
+               *next_delay = 0;
+       } else
+               *next_delay = HZ / 10;
+
+       /* remember when we left the operating channel */
+       local->leave_oper_channel_time = jiffies;
+
+       /* advance to the next channel to be scanned */
+       local->next_scan_state = SCAN_DECISION;
+}
+
 void ieee80211_scan_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
@@ -743,11 +698,11 @@ void ieee80211_scan_work(struct work_struct *work)
                case SCAN_SEND_PROBE:
                        ieee80211_scan_state_send_probe(local, &next_delay);
                        break;
-               case SCAN_LEAVE_OPER_CHANNEL:
-                       ieee80211_scan_state_leave_oper_channel(local, &next_delay);
+               case SCAN_SUSPEND:
+                       ieee80211_scan_state_suspend(local, &next_delay);
                        break;
-               case SCAN_ENTER_OPER_CHANNEL:
-                       ieee80211_scan_state_enter_oper_channel(local, &next_delay);
+               case SCAN_RESUME:
+                       ieee80211_scan_state_resume(local, &next_delay);
                        break;
                }
        } while (next_delay == 0);
index 8eaa746..f982352 100644 (file)
@@ -351,10 +351,6 @@ static int sta_info_finish_insert(struct sta_info *sta,
 
        if (!sta->dummy || dummy_reinsert) {
                /* notify driver */
-               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-                       sdata = container_of(sdata->bss,
-                                            struct ieee80211_sub_if_data,
-                                            u.ap);
                err = drv_sta_add(local, sdata, &sta->sta);
                if (err) {
                        if (!async)
index 8c8ce05..1a14fab 100644 (file)
@@ -30,7 +30,6 @@
  *     when virtual port control is not in use.
  * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
  *     frames.
- * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
  * @WLAN_STA_WME: Station is a QoS-STA.
  * @WLAN_STA_WDS: Station is one of our WDS peers.
  * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
@@ -52,6 +51,7 @@
  *     unblocks the station.
  * @WLAN_STA_SP: Station is in a service period, so don't try to
  *     reply to other uAPSD trigger frames or PS-Poll.
+ * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
  */
 enum ieee80211_sta_info_flags {
        WLAN_STA_AUTH,
@@ -59,7 +59,6 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_PS_STA,
        WLAN_STA_AUTHORIZED,
        WLAN_STA_SHORT_PREAMBLE,
-       WLAN_STA_ASSOC_AP,
        WLAN_STA_WME,
        WLAN_STA_WDS,
        WLAN_STA_CLEAR_PS_FILT,
@@ -71,6 +70,7 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_TDLS_PEER_AUTH,
        WLAN_STA_UAPSD,
        WLAN_STA_SP,
+       WLAN_STA_4ADDR_EVENT,
 };
 
 #define STA_TID_NUM 16
@@ -88,6 +88,7 @@ enum ieee80211_sta_info_flags {
  * struct tid_ampdu_tx - TID aggregation information (Tx).
  *
  * @rcu_head: rcu head for freeing structure
+ * @session_timer: check if we keep Tx-ing on the TID (by timeout value)
  * @addba_resp_timer: timer for peer's response to addba request
  * @pending: pending frames queue -- use sta's spinlock to protect
  * @dialog_token: dialog token for aggregation session
@@ -110,6 +111,7 @@ enum ieee80211_sta_info_flags {
  */
 struct tid_ampdu_tx {
        struct rcu_head rcu_head;
+       struct timer_list session_timer;
        struct timer_list addba_resp_timer;
        struct sk_buff_head pending;
        unsigned long state;
@@ -390,6 +392,12 @@ static inline int test_and_clear_sta_flag(struct sta_info *sta,
        return test_and_clear_bit(flag, &sta->_flags);
 }
 
+static inline int test_and_set_sta_flag(struct sta_info *sta,
+                                       enum ieee80211_sta_info_flags flag)
+{
+       return test_and_set_bit(flag, &sta->_flags);
+}
+
 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
                             struct tid_ampdu_tx *tid_tx);
 
@@ -491,7 +499,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
  */
 int sta_info_insert(struct sta_info *sta);
 int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
-int sta_info_insert_atomic(struct sta_info *sta);
 int sta_info_reinsert(struct sta_info *sta);
 
 int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
index 16518f3..46222ce 100644 (file)
@@ -517,27 +517,54 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
-               struct ieee80211_work *wk;
                u64 cookie = (unsigned long)skb;
 
-               rcu_read_lock();
-               list_for_each_entry_rcu(wk, &local->work_list, list) {
-                       if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
-                               continue;
-                       if (wk->offchan_tx.frame != skb)
-                               continue;
-                       wk->offchan_tx.status = true;
-                       break;
-               }
-               rcu_read_unlock();
-               if (local->hw_roc_skb_for_status == skb) {
-                       cookie = local->hw_roc_cookie ^ 2;
-                       local->hw_roc_skb_for_status = NULL;
+               if (ieee80211_is_nullfunc(hdr->frame_control) ||
+                   ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+                       bool acked = info->flags & IEEE80211_TX_STAT_ACK;
+                       cfg80211_probe_status(skb->dev, hdr->addr1,
+                                             cookie, acked, GFP_ATOMIC);
+               } else {
+                       struct ieee80211_work *wk;
+
+                       rcu_read_lock();
+                       list_for_each_entry_rcu(wk, &local->work_list, list) {
+                               if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
+                                       continue;
+                               if (wk->offchan_tx.frame != skb)
+                                       continue;
+                               wk->offchan_tx.status = true;
+                               break;
+                       }
+                       rcu_read_unlock();
+                       if (local->hw_roc_skb_for_status == skb) {
+                               cookie = local->hw_roc_cookie ^ 2;
+                               local->hw_roc_skb_for_status = NULL;
+                       }
+
+                       cfg80211_mgmt_tx_status(
+                               skb->dev, cookie, skb->data, skb->len,
+                               !!(info->flags & IEEE80211_TX_STAT_ACK),
+                               GFP_ATOMIC);
                }
+       }
 
-               cfg80211_mgmt_tx_status(
-                       skb->dev, cookie, skb->data, skb->len,
-                       !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
+       if (unlikely(info->ack_frame_id)) {
+               struct sk_buff *ack_skb;
+               unsigned long flags;
+
+               spin_lock_irqsave(&local->ack_status_lock, flags);
+               ack_skb = idr_find(&local->ack_status_frames,
+                                  info->ack_frame_id);
+               if (ack_skb)
+                       idr_remove(&local->ack_status_frames,
+                                  info->ack_frame_id);
+               spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+               /* consumes ack_skb */
+               if (ack_skb)
+                       skb_complete_wifi_ack(ack_skb,
+                               info->flags & IEEE80211_TX_STAT_ACK);
        }
 
        /* this was a transmitted frame, but now we want to reuse it */
@@ -610,3 +637,29 @@ void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
                                    num_packets, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(ieee80211_report_low_ack);
+
+void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       if (unlikely(info->ack_frame_id)) {
+               struct sk_buff *ack_skb;
+               unsigned long flags;
+
+               spin_lock_irqsave(&local->ack_status_lock, flags);
+               ack_skb = idr_find(&local->ack_status_frames,
+                                  info->ack_frame_id);
+               if (ack_skb)
+                       idr_remove(&local->ack_status_frames,
+                                  info->ack_frame_id);
+               spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+               /* consumes ack_skb */
+               if (ack_skb)
+                       dev_kfree_skb_any(ack_skb);
+       }
+
+       dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ieee80211_free_txskb);
index 1f8b120..e74652d 100644 (file)
@@ -36,7 +36,8 @@
 
 /* misc utils */
 
-static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
+static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
+                                struct sk_buff *skb, int group_addr,
                                 int next_frag_len)
 {
        int rate, mrate, erp, dur, i;
@@ -44,7 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
        struct ieee80211_local *local = tx->local;
        struct ieee80211_supported_band *sband;
        struct ieee80211_hdr *hdr;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
        /* assume HW handles this */
        if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
@@ -76,7 +77,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
         *   at the highest possible rate belonging to the PHY rates in the
         *   BSSBasicRateSet
         */
-       hdr = (struct ieee80211_hdr *)tx->skb->data;
+       hdr = (struct ieee80211_hdr *)skb->data;
        if (ieee80211_is_ctl(hdr->frame_control)) {
                /* TODO: These control frames are not currently sent by
                 * mac80211, but should they be implemented, this function
@@ -150,11 +151,15 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
                rate = mrate;
        }
 
-       /* Time needed to transmit ACK
-        * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
-        * to closest integer */
-
-       dur = ieee80211_frame_duration(local, 10, rate, erp,
+       /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
+       if (ieee80211_is_data_qos(hdr->frame_control) &&
+           *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
+               dur = 0;
+       else
+               /* Time needed to transmit ACK
+                * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
+                * to closest integer */
+               dur = ieee80211_frame_duration(local, 10, rate, erp,
                                tx->sdata->vif.bss_conf.use_short_preamble);
 
        if (next_frag_len) {
@@ -572,8 +577,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                switch (tx->key->conf.cipher) {
                case WLAN_CIPHER_SUITE_WEP40:
                case WLAN_CIPHER_SUITE_WEP104:
-                       if (ieee80211_is_auth(hdr->frame_control))
-                               break;
                case WLAN_CIPHER_SUITE_TKIP:
                        if (!ieee80211_is_data_present(hdr->frame_control))
                                tx->key = NULL;
@@ -637,6 +640,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        else
                txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+                   tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
                    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
 
        /* set up RTS protection if desired */
@@ -844,11 +848,13 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
        return TX_CONTINUE;
 }
 
-static int ieee80211_fragment(struct ieee80211_local *local,
+static int ieee80211_fragment(struct ieee80211_tx_data *tx,
                              struct sk_buff *skb, int hdrlen,
                              int frag_threshold)
 {
-       struct sk_buff *tail = skb, *tmp;
+       struct ieee80211_local *local = tx->local;
+       struct ieee80211_tx_info *info;
+       struct sk_buff *tmp;
        int per_fragm = frag_threshold - hdrlen - FCS_LEN;
        int pos = hdrlen + per_fragm;
        int rem = skb->len - hdrlen - per_fragm;
@@ -856,6 +862,8 @@ static int ieee80211_fragment(struct ieee80211_local *local,
        if (WARN_ON(rem < 0))
                return -EINVAL;
 
+       /* first fragment was already added to queue by caller */
+
        while (rem) {
                int fraglen = per_fragm;
 
@@ -868,12 +876,21 @@ static int ieee80211_fragment(struct ieee80211_local *local,
                                    IEEE80211_ENCRYPT_TAILROOM);
                if (!tmp)
                        return -ENOMEM;
-               tail->next = tmp;
-               tail = tmp;
+
+               __skb_queue_tail(&tx->skbs, tmp);
+
                skb_reserve(tmp, local->tx_headroom +
                                 IEEE80211_ENCRYPT_HEADROOM);
                /* copy control information */
                memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
+
+               info = IEEE80211_SKB_CB(tmp);
+               info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
+                                IEEE80211_TX_CTL_FIRST_FRAGMENT);
+
+               if (rem)
+                       info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
+
                skb_copy_queue_mapping(tmp, skb);
                tmp->priority = skb->priority;
                tmp->dev = skb->dev;
@@ -885,6 +902,7 @@ static int ieee80211_fragment(struct ieee80211_local *local,
                pos += fraglen;
        }
 
+       /* adjust first fragment's length */
        skb->len = hdrlen + per_fragm;
        return 0;
 }
@@ -899,6 +917,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
        int hdrlen;
        int fragnum;
 
+       /* no matter what happens, tx->skb moves to tx->skbs */
+       __skb_queue_tail(&tx->skbs, skb);
+       tx->skb = NULL;
+
        if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
                return TX_CONTINUE;
 
@@ -927,21 +949,21 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
         * of the fragments then we will simply pretend to accept the skb
         * but store it away as pending.
         */
-       if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
+       if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
                return TX_DROP;
 
        /* update duration/seq/flags of fragments */
        fragnum = 0;
-       do {
+
+       skb_queue_walk(&tx->skbs, skb) {
                int next_len;
                const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
 
                hdr = (void *)skb->data;
                info = IEEE80211_SKB_CB(skb);
 
-               if (skb->next) {
+               if (!skb_queue_is_last(&tx->skbs, skb)) {
                        hdr->frame_control |= morefrags;
-                       next_len = skb->next->len;
                        /*
                         * No multi-rate retries for fragmented frames, that
                         * would completely throw off the NAV at other STAs.
@@ -956,10 +978,9 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
                        hdr->frame_control &= ~morefrags;
                        next_len = 0;
                }
-               hdr->duration_id = ieee80211_duration(tx, 0, next_len);
                hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
                fragnum++;
-       } while ((skb = skb->next));
+       }
 
        return TX_CONTINUE;
 }
@@ -967,16 +988,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
+       struct sk_buff *skb;
 
        if (!tx->sta)
                return TX_CONTINUE;
 
        tx->sta->tx_packets++;
-       do {
+       skb_queue_walk(&tx->skbs, skb) {
                tx->sta->tx_fragments++;
                tx->sta->tx_bytes += skb->len;
-       } while ((skb = skb->next));
+       }
 
        return TX_CONTINUE;
 }
@@ -1015,21 +1036,25 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
+       struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
        int next_len;
        bool group_addr;
 
-       do {
+       skb_queue_walk(&tx->skbs, skb) {
                hdr = (void *) skb->data;
                if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
                        break; /* must not overwrite AID */
-               next_len = skb->next ? skb->next->len : 0;
+               if (!skb_queue_is_last(&tx->skbs, skb)) {
+                       struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
+                       next_len = next->len;
+               } else
+                       next_len = 0;
                group_addr = is_multicast_ether_addr(hdr->addr1);
 
                hdr->duration_id =
-                       ieee80211_duration(tx, group_addr, next_len);
-       } while ((skb = skb->next));
+                       ieee80211_duration(tx, skb, group_addr, next_len);
+       }
 
        return TX_CONTINUE;
 }
@@ -1043,9 +1068,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                                  int tid)
 {
        bool queued = false;
+       bool reset_agg_timer = false;
 
        if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
                info->flags |= IEEE80211_TX_CTL_AMPDU;
+               reset_agg_timer = true;
        } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /*
                 * nothing -- this aggregation session is being started
@@ -1077,6 +1104,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                        /* do nothing, let packet pass through */
                } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
                        info->flags |= IEEE80211_TX_CTL_AMPDU;
+                       reset_agg_timer = true;
                } else {
                        queued = true;
                        info->control.vif = &tx->sdata->vif;
@@ -1086,6 +1114,11 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                spin_unlock(&tx->sta->lock);
        }
 
+       /* reset session timer */
+       if (reset_agg_timer && tid_tx->timeout)
+               mod_timer(&tid_tx->session_timer,
+                         TU_TO_EXP_TIME(tid_tx->timeout));
+
        return queued;
 }
 
@@ -1108,6 +1141,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        tx->local = local;
        tx->sdata = sdata;
        tx->channel = local->hw.conf.channel;
+       __skb_queue_head_init(&tx->skbs);
 
        /*
         * If this flag is set to true anywhere, and we get here,
@@ -1152,16 +1186,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        if (is_multicast_ether_addr(hdr->addr1)) {
                tx->flags &= ~IEEE80211_TX_UNICAST;
                info->flags |= IEEE80211_TX_CTL_NO_ACK;
-       } else {
+       } else
                tx->flags |= IEEE80211_TX_UNICAST;
-               if (unlikely(local->wifi_wme_noack_test))
-                       info->flags |= IEEE80211_TX_CTL_NO_ACK;
-               /*
-                * Flags are initialized to 0. Hence, no need to
-                * explicitly unset IEEE80211_TX_CTL_NO_ACK since
-                * it might already be set for injected frames.
-                */
-       }
 
        if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
                if (!(tx->flags & IEEE80211_TX_UNICAST) ||
@@ -1180,22 +1206,18 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
        return TX_CONTINUE;
 }
 
-/*
- * Returns false if the frame couldn't be transmitted but was queued instead.
- */
-static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
-                          struct sta_info *sta, bool txpending)
+static bool ieee80211_tx_frags(struct ieee80211_local *local,
+                              struct ieee80211_vif *vif,
+                              struct ieee80211_sta *sta,
+                              struct sk_buff_head *skbs,
+                              bool txpending)
 {
-       struct sk_buff *skb = *skbp, *next;
+       struct sk_buff *skb, *tmp;
        struct ieee80211_tx_info *info;
-       struct ieee80211_sub_if_data *sdata;
        unsigned long flags;
-       int len;
-       bool fragm = false;
 
-       while (skb) {
+       skb_queue_walk_safe(skbs, skb, tmp) {
                int q = skb_get_queue_mapping(skb);
-               __le16 fc;
 
                spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
                if (local->queue_stop_reasons[q] ||
@@ -1205,24 +1227,11 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
                         * transmission from the tx-pending tasklet when the
                         * queue is woken again.
                         */
-
-                       do {
-                               next = skb->next;
-                               skb->next = NULL;
-                               /*
-                                * NB: If txpending is true, next must already
-                                * be NULL since we must've gone through this
-                                * loop before already; therefore we can just
-                                * queue the frame to the head without worrying
-                                * about reordering of fragments.
-                                */
-                               if (unlikely(txpending))
-                                       __skb_queue_head(&local->pending[q],
-                                                        skb);
-                               else
-                                       __skb_queue_tail(&local->pending[q],
-                                                        skb);
-                       } while ((skb = next));
+                       if (txpending)
+                               skb_queue_splice_init(skbs, &local->pending[q]);
+                       else
+                               skb_queue_splice_tail_init(skbs,
+                                                          &local->pending[q]);
 
                        spin_unlock_irqrestore(&local->queue_stop_reason_lock,
                                               flags);
@@ -1231,47 +1240,72 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
                spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
                info = IEEE80211_SKB_CB(skb);
+               info->control.vif = vif;
+               info->control.sta = sta;
 
-               if (fragm)
-                       info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
-                                        IEEE80211_TX_CTL_FIRST_FRAGMENT);
-
-               next = skb->next;
-               len = skb->len;
+               __skb_unlink(skb, skbs);
+               drv_tx(local, skb);
+       }
 
-               if (next)
-                       info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
+       return true;
+}
 
-               sdata = vif_to_sdata(info->control.vif);
+/*
+ * Returns false if the frame couldn't be transmitted but was queued instead.
+ */
+static bool __ieee80211_tx(struct ieee80211_local *local,
+                          struct sk_buff_head *skbs, int led_len,
+                          struct sta_info *sta, bool txpending)
+{
+       struct ieee80211_tx_info *info;
+       struct ieee80211_sub_if_data *sdata;
+       struct ieee80211_vif *vif;
+       struct ieee80211_sta *pubsta;
+       struct sk_buff *skb;
+       bool result = true;
+       __le16 fc;
 
-               switch (sdata->vif.type) {
-               case NL80211_IFTYPE_MONITOR:
-                       info->control.vif = NULL;
-                       break;
-               case NL80211_IFTYPE_AP_VLAN:
-                       info->control.vif = &container_of(sdata->bss,
-                               struct ieee80211_sub_if_data, u.ap)->vif;
-                       break;
-               default:
-                       /* keep */
-                       break;
-               }
+       if (WARN_ON(skb_queue_empty(skbs)))
+               return true;
 
-               if (sta && sta->uploaded)
-                       info->control.sta = &sta->sta;
-               else
-                       info->control.sta = NULL;
+       skb = skb_peek(skbs);
+       fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
+       info = IEEE80211_SKB_CB(skb);
+       sdata = vif_to_sdata(info->control.vif);
+       if (sta && !sta->uploaded)
+               sta = NULL;
 
-               fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
-               drv_tx(local, skb);
+       if (sta)
+               pubsta = &sta->sta;
+       else
+               pubsta = NULL;
 
-               ieee80211_tpt_led_trig_tx(local, fc, len);
-               *skbp = skb = next;
-               ieee80211_led_tx(local, 1);
-               fragm = true;
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_MONITOR:
+               sdata = NULL;
+               vif = NULL;
+               break;
+       case NL80211_IFTYPE_AP_VLAN:
+               sdata = container_of(sdata->bss,
+                                    struct ieee80211_sub_if_data, u.ap);
+               /* fall through */
+       default:
+               vif = &sdata->vif;
+               break;
        }
 
-       return true;
+       if (local->ops->tx_frags)
+               drv_tx_frags(local, vif, pubsta, skbs);
+       else
+               result = ieee80211_tx_frags(local, vif, pubsta, skbs,
+                                           txpending);
+
+       ieee80211_tpt_led_trig_tx(local, fc, led_len);
+       ieee80211_led_tx(local, 1);
+
+       WARN_ON_ONCE(!skb_queue_empty(skbs));
+
+       return result;
 }
 
 /*
@@ -1280,8 +1314,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
  */
 static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
        ieee80211_tx_result res = TX_DROP;
 
 #define CALL_TXH(txh) \
@@ -1299,8 +1332,11 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
        if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
                CALL_TXH(ieee80211_tx_h_rate_ctrl);
 
-       if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
+               __skb_queue_tail(&tx->skbs, tx->skb);
+               tx->skb = NULL;
                goto txh_done;
+       }
 
        CALL_TXH(ieee80211_tx_h_michael_mic_add);
        CALL_TXH(ieee80211_tx_h_sequence);
@@ -1315,13 +1351,10 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
  txh_done:
        if (unlikely(res == TX_DROP)) {
                I802_DEBUG_INC(tx->local->tx_handlers_drop);
-               while (skb) {
-                       struct sk_buff *next;
-
-                       next = skb->next;
-                       dev_kfree_skb(skb);
-                       skb = next;
-               }
+               if (tx->skb)
+                       dev_kfree_skb(tx->skb);
+               else
+                       __skb_queue_purge(&tx->skbs);
                return -1;
        } else if (unlikely(res == TX_QUEUED)) {
                I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1342,6 +1375,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx_result res_prepare;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        bool result = true;
+       int led_len;
 
        if (unlikely(skb->len < 10)) {
                dev_kfree_skb(skb);
@@ -1351,6 +1385,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
        rcu_read_lock();
 
        /* initialises tx */
+       led_len = skb->len;
        res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
 
        if (unlikely(res_prepare == TX_DROP)) {
@@ -1364,7 +1399,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
        info->band = tx.channel->band;
 
        if (!invoke_tx_handlers(&tx))
-               result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
+               result = __ieee80211_tx(local, &tx.skbs, led_len,
+                                       tx.sta, txpending);
  out:
        rcu_read_unlock();
        return result;
@@ -1431,7 +1467,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
        if (ieee80211_vif_is_mesh(&sdata->vif) &&
            ieee80211_is_data(hdr->frame_control) &&
                !is_multicast_ether_addr(hdr->addr1))
-                       if (mesh_nexthop_lookup(skb, sdata)) {
+                       if (mesh_nexthop_resolve(skb, sdata)) {
                                /* skb queued: don't free */
                                rcu_read_unlock();
                                return;
@@ -1685,8 +1721,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        int nh_pos, h_pos;
        struct sta_info *sta = NULL;
        bool wme_sta = false, authorized = false, tdls_auth = false;
-       struct sk_buff *tmp_skb;
        bool tdls_direct = false;
+       bool multicast;
+       u32 info_flags = 0;
+       u16 info_id = 0;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                ret = NETDEV_TX_OK;
@@ -1873,7 +1911,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
         * if it is a multicast address (which can only happen
         * in AP mode)
         */
-       if (!is_multicast_ether_addr(hdr.addr1)) {
+       multicast = is_multicast_ether_addr(hdr.addr1);
+       if (!multicast) {
                rcu_read_lock();
                sta = sta_info_get(sdata, hdr.addr1);
                if (sta) {
@@ -1914,11 +1953,54 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                goto fail;
        }
 
+       if (unlikely(!multicast && skb->sk &&
+                    skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
+               struct sk_buff *orig_skb = skb;
+
+               skb = skb_clone(skb, GFP_ATOMIC);
+               if (skb) {
+                       unsigned long flags;
+                       int id, r;
+
+                       spin_lock_irqsave(&local->ack_status_lock, flags);
+                       r = idr_get_new_above(&local->ack_status_frames,
+                                             orig_skb, 1, &id);
+                       if (r == -EAGAIN) {
+                               idr_pre_get(&local->ack_status_frames,
+                                           GFP_ATOMIC);
+                               r = idr_get_new_above(&local->ack_status_frames,
+                                                     orig_skb, 1, &id);
+                       }
+                       if (WARN_ON(!id) || id > 0xffff) {
+                               idr_remove(&local->ack_status_frames, id);
+                               r = -ERANGE;
+                       }
+                       spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+                       if (!r) {
+                               info_id = id;
+                               info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+                       } else if (skb_shared(skb)) {
+                               kfree_skb(orig_skb);
+                       } else {
+                               kfree_skb(skb);
+                               skb = orig_skb;
+                       }
+               } else {
+                       /* couldn't clone -- lose tx status ... */
+                       skb = orig_skb;
+               }
+       }
+
        /*
         * If the skb is shared we need to obtain our own copy.
         */
        if (skb_shared(skb)) {
-               tmp_skb = skb;
+               struct sk_buff *tmp_skb = skb;
+
+               /* can't happen -- skb is a clone if info_id != 0 */
+               WARN_ON(info_id);
+
                skb = skb_clone(skb, GFP_ATOMIC);
                kfree_skb(tmp_skb);
 
@@ -2019,6 +2101,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        memset(info, 0, sizeof(*info));
 
        dev->trans_start = jiffies;
+
+       info->flags = info_flags;
+       info->ack_frame_id = info_id;
+
        ieee80211_xmit(sdata, skb);
 
        return NETDEV_TX_OK;
@@ -2062,10 +2148,15 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
        if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
                result = ieee80211_tx(sdata, skb, true);
        } else {
+               struct sk_buff_head skbs;
+
+               __skb_queue_head_init(&skbs);
+               __skb_queue_tail(&skbs, skb);
+
                hdr = (struct ieee80211_hdr *)skb->data;
                sta = sta_info_get(sdata, hdr->addr1);
 
-               result = __ieee80211_tx(local, &skb, sta, true);
+               result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
        }
 
        return result;
@@ -2178,10 +2269,10 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
                /* Bitmap control */
                *pos++ = n1 | aid0;
                /* Part Virt Bitmap */
+               skb_put(skb, n2 - n1);
                memcpy(pos, bss->tim + n1, n2 - n1 + 1);
 
                tim[1] = n2 - n1 + 4;
-               skb_put(skb, n2 - n1);
        } else {
                *pos++ = aid0; /* Bitmap control */
                *pos++ = 0; /* Part Virt Bitmap */
@@ -2279,22 +2370,31 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
        } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
                struct ieee80211_mgmt *mgmt;
                u8 *pos;
+               int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
+                             sizeof(mgmt->u.beacon);
 
 #ifdef CONFIG_MAC80211_MESH
                if (!sdata->u.mesh.mesh_id_len)
                        goto out;
 #endif
 
-               /* headroom, head length, tail length and maximum TIM length */
-               skb = dev_alloc_skb(local->tx_headroom + 400 +
-                               sdata->u.mesh.ie_len);
+               skb = dev_alloc_skb(local->tx_headroom +
+                                   hdr_len +
+                                   2 + /* NULL SSID */
+                                   2 + 8 + /* supported rates */
+                                   2 + 3 + /* DS params */
+                                   2 + (IEEE80211_MAX_SUPP_RATES - 8) +
+                                   2 + sizeof(struct ieee80211_ht_cap) +
+                                   2 + sizeof(struct ieee80211_ht_info) +
+                                   2 + sdata->u.mesh.mesh_id_len +
+                                   2 + sizeof(struct ieee80211_meshconf_ie) +
+                                   sdata->u.mesh.ie_len);
                if (!skb)
                        goto out;
 
                skb_reserve(skb, local->hw.extra_tx_headroom);
-               mgmt = (struct ieee80211_mgmt *)
-                       skb_put(skb, 24 + sizeof(mgmt->u.beacon));
-               memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
+               mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
+               memset(mgmt, 0, hdr_len);
                mgmt->frame_control =
                    cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
                memset(mgmt->da, 0xff, ETH_ALEN);
@@ -2313,6 +2413,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                    mesh_add_ds_params_ie(skb, sdata) ||
                    ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
                    mesh_add_rsn_ie(skb, sdata) ||
+                   mesh_add_ht_cap_ie(skb, sdata) ||
+                   mesh_add_ht_info_ie(skb, sdata) ||
                    mesh_add_meshid_ie(skb, sdata) ||
                    mesh_add_meshconf_ie(skb, sdata) ||
                    mesh_add_vendor_ies(skb, sdata)) {
@@ -2355,6 +2457,37 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
 
+struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif)
+{
+       struct ieee80211_if_ap *ap = NULL;
+       struct sk_buff *presp = NULL, *skb = NULL;
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       if (sdata->vif.type != NL80211_IFTYPE_AP)
+               return NULL;
+
+       rcu_read_lock();
+
+       ap = &sdata->u.ap;
+       presp = rcu_dereference(ap->probe_resp);
+       if (!presp)
+               goto out;
+
+       skb = skb_copy(presp, GFP_ATOMIC);
+       if (!skb)
+               goto out;
+
+       hdr = (struct ieee80211_hdr *) skb->data;
+       memset(hdr->addr1, 0, sizeof(hdr->addr1));
+
+out:
+       rcu_read_unlock();
+       return skb;
+}
+EXPORT_SYMBOL(ieee80211_proberesp_get);
+
 struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif)
 {
index d5230ec..ac7ea29 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
 #include <linux/bitmap.h>
+#include <linux/crc32.h>
 #include <net/net_namespace.h>
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
@@ -96,13 +97,13 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
 
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
+       struct sk_buff *skb;
        struct ieee80211_hdr *hdr;
 
-       do {
+       skb_queue_walk(&tx->skbs, skb) {
                hdr = (struct ieee80211_hdr *) skb->data;
                hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-       } while ((skb = skb->next));
+       }
 }
 
 int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -564,6 +565,172 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(ieee80211_queue_delayed_work);
 
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
+                              struct ieee802_11_elems *elems,
+                              u64 filter, u32 crc)
+{
+       size_t left = len;
+       u8 *pos = start;
+       bool calc_crc = filter != 0;
+
+       memset(elems, 0, sizeof(*elems));
+       elems->ie_start = start;
+       elems->total_len = len;
+
+       while (left >= 2) {
+               u8 id, elen;
+
+               id = *pos++;
+               elen = *pos++;
+               left -= 2;
+
+               if (elen > left)
+                       break;
+
+               if (calc_crc && id < 64 && (filter & (1ULL << id)))
+                       crc = crc32_be(crc, pos - 2, elen + 2);
+
+               switch (id) {
+               case WLAN_EID_SSID:
+                       elems->ssid = pos;
+                       elems->ssid_len = elen;
+                       break;
+               case WLAN_EID_SUPP_RATES:
+                       elems->supp_rates = pos;
+                       elems->supp_rates_len = elen;
+                       break;
+               case WLAN_EID_FH_PARAMS:
+                       elems->fh_params = pos;
+                       elems->fh_params_len = elen;
+                       break;
+               case WLAN_EID_DS_PARAMS:
+                       elems->ds_params = pos;
+                       elems->ds_params_len = elen;
+                       break;
+               case WLAN_EID_CF_PARAMS:
+                       elems->cf_params = pos;
+                       elems->cf_params_len = elen;
+                       break;
+               case WLAN_EID_TIM:
+                       if (elen >= sizeof(struct ieee80211_tim_ie)) {
+                               elems->tim = (void *)pos;
+                               elems->tim_len = elen;
+                       }
+                       break;
+               case WLAN_EID_IBSS_PARAMS:
+                       elems->ibss_params = pos;
+                       elems->ibss_params_len = elen;
+                       break;
+               case WLAN_EID_CHALLENGE:
+                       elems->challenge = pos;
+                       elems->challenge_len = elen;
+                       break;
+               case WLAN_EID_VENDOR_SPECIFIC:
+                       if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+                           pos[2] == 0xf2) {
+                               /* Microsoft OUI (00:50:F2) */
+
+                               if (calc_crc)
+                                       crc = crc32_be(crc, pos - 2, elen + 2);
+
+                               if (pos[3] == 1) {
+                                       /* OUI Type 1 - WPA IE */
+                                       elems->wpa = pos;
+                                       elems->wpa_len = elen;
+                               } else if (elen >= 5 && pos[3] == 2) {
+                                       /* OUI Type 2 - WMM IE */
+                                       if (pos[4] == 0) {
+                                               elems->wmm_info = pos;
+                                               elems->wmm_info_len = elen;
+                                       } else if (pos[4] == 1) {
+                                               elems->wmm_param = pos;
+                                               elems->wmm_param_len = elen;
+                                       }
+                               }
+                       }
+                       break;
+               case WLAN_EID_RSN:
+                       elems->rsn = pos;
+                       elems->rsn_len = elen;
+                       break;
+               case WLAN_EID_ERP_INFO:
+                       elems->erp_info = pos;
+                       elems->erp_info_len = elen;
+                       break;
+               case WLAN_EID_EXT_SUPP_RATES:
+                       elems->ext_supp_rates = pos;
+                       elems->ext_supp_rates_len = elen;
+                       break;
+               case WLAN_EID_HT_CAPABILITY:
+                       if (elen >= sizeof(struct ieee80211_ht_cap))
+                               elems->ht_cap_elem = (void *)pos;
+                       break;
+               case WLAN_EID_HT_INFORMATION:
+                       if (elen >= sizeof(struct ieee80211_ht_info))
+                               elems->ht_info_elem = (void *)pos;
+                       break;
+               case WLAN_EID_MESH_ID:
+                       elems->mesh_id = pos;
+                       elems->mesh_id_len = elen;
+                       break;
+               case WLAN_EID_MESH_CONFIG:
+                       if (elen >= sizeof(struct ieee80211_meshconf_ie))
+                               elems->mesh_config = (void *)pos;
+                       break;
+               case WLAN_EID_PEER_MGMT:
+                       elems->peering = pos;
+                       elems->peering_len = elen;
+                       break;
+               case WLAN_EID_PREQ:
+                       elems->preq = pos;
+                       elems->preq_len = elen;
+                       break;
+               case WLAN_EID_PREP:
+                       elems->prep = pos;
+                       elems->prep_len = elen;
+                       break;
+               case WLAN_EID_PERR:
+                       elems->perr = pos;
+                       elems->perr_len = elen;
+                       break;
+               case WLAN_EID_RANN:
+                       if (elen >= sizeof(struct ieee80211_rann_ie))
+                               elems->rann = (void *)pos;
+                       break;
+               case WLAN_EID_CHANNEL_SWITCH:
+                       elems->ch_switch_elem = pos;
+                       elems->ch_switch_elem_len = elen;
+                       break;
+               case WLAN_EID_QUIET:
+                       if (!elems->quiet_elem) {
+                               elems->quiet_elem = pos;
+                               elems->quiet_elem_len = elen;
+                       }
+                       elems->num_of_quiet_elem++;
+                       break;
+               case WLAN_EID_COUNTRY:
+                       elems->country_elem = pos;
+                       elems->country_elem_len = elen;
+                       break;
+               case WLAN_EID_PWR_CONSTRAINT:
+                       elems->pwr_constr_elem = pos;
+                       elems->pwr_constr_elem_len = elen;
+                       break;
+               case WLAN_EID_TIMEOUT_INTERVAL:
+                       elems->timeout_int = pos;
+                       elems->timeout_int_len = elen;
+                       break;
+               default:
+                       break;
+               }
+
+               left -= elen;
+               pos += elen;
+       }
+
+       return crc;
+}
+
 void ieee802_11_parse_elems(u8 *start, size_t len,
                            struct ieee802_11_elems *elems)
 {
@@ -812,23 +979,9 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                offset = noffset;
        }
 
-       if (sband->ht_cap.ht_supported) {
-               u16 cap = sband->ht_cap.cap;
-               __le16 tmp;
-
-               *pos++ = WLAN_EID_HT_CAPABILITY;
-               *pos++ = sizeof(struct ieee80211_ht_cap);
-               memset(pos, 0, sizeof(struct ieee80211_ht_cap));
-               tmp = cpu_to_le16(cap);
-               memcpy(pos, &tmp, sizeof(u16));
-               pos += sizeof(u16);
-               *pos++ = sband->ht_cap.ampdu_factor |
-                        (sband->ht_cap.ampdu_density <<
-                               IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
-               memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
-               pos += sizeof(sband->ht_cap.mcs);
-               pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
-       }
+       if (sband->ht_cap.ht_supported)
+               pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
+                                               sband->ht_cap.cap);
 
        /*
         * If adding more here, adjust code in main.c
@@ -1026,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
                    sdata->vif.type != NL80211_IFTYPE_MONITOR &&
                    ieee80211_sdata_running(sdata))
-                       res = drv_add_interface(local, &sdata->vif);
+                       res = drv_add_interface(local, sdata);
        }
 
        /* add STAs back */
@@ -1076,7 +1229,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                          BSS_CHANGED_BEACON_INT |
                          BSS_CHANGED_BSSID |
                          BSS_CHANGED_CQM |
-                         BSS_CHANGED_QOS;
+                         BSS_CHANGED_QOS |
+                         BSS_CHANGED_IDLE;
 
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
@@ -1090,6 +1244,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                        /* fall through */
                case NL80211_IFTYPE_AP:
                        changed |= BSS_CHANGED_SSID;
+
+                       if (sdata->vif.type == NL80211_IFTYPE_AP)
+                               changed |= BSS_CHANGED_AP_PROBE_RESP;
+
                        /* fall through */
                case NL80211_IFTYPE_MESH_POINT:
                        changed |= BSS_CHANGED_BEACON |
@@ -1111,6 +1269,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                }
        }
 
+       ieee80211_recalc_ps(local, -1);
+
        /*
         * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
         * sessions can be established after a resume.
@@ -1366,6 +1526,108 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
 }
 EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
 
+u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
+                             u16 cap)
+{
+       __le16 tmp;
+
+       *pos++ = WLAN_EID_HT_CAPABILITY;
+       *pos++ = sizeof(struct ieee80211_ht_cap);
+       memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+
+       /* capability flags */
+       tmp = cpu_to_le16(cap);
+       memcpy(pos, &tmp, sizeof(u16));
+       pos += sizeof(u16);
+
+       /* AMPDU parameters */
+       *pos++ = ht_cap->ampdu_factor |
+                (ht_cap->ampdu_density <<
+                       IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
+
+       /* MCS set */
+       memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs));
+       pos += sizeof(ht_cap->mcs);
+
+       /* extended capabilities */
+       pos += sizeof(__le16);
+
+       /* BF capabilities */
+       pos += sizeof(__le32);
+
+       /* antenna selection */
+       pos += sizeof(u8);
+
+       return pos;
+}
+
+u8 *ieee80211_ie_build_ht_info(u8 *pos,
+                              struct ieee80211_sta_ht_cap *ht_cap,
+                              struct ieee80211_channel *channel,
+                              enum nl80211_channel_type channel_type)
+{
+       struct ieee80211_ht_info *ht_info;
+       /* Build HT Information */
+       *pos++ = WLAN_EID_HT_INFORMATION;
+       *pos++ = sizeof(struct ieee80211_ht_info);
+       ht_info = (struct ieee80211_ht_info *)pos;
+       ht_info->control_chan =
+                       ieee80211_frequency_to_channel(channel->center_freq);
+       switch (channel_type) {
+       case NL80211_CHAN_HT40MINUS:
+               ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+               break;
+       case NL80211_CHAN_HT40PLUS:
+               ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+               break;
+       case NL80211_CHAN_HT20:
+       default:
+               ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+               break;
+       }
+       if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+               ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
+
+       /*
+        * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
+        * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
+        */
+       ht_info->operation_mode = 0x0000;
+       ht_info->stbc_param = 0x0000;
+
+       /* It seems that Basic MCS set and Supported MCS set
+          are identical for the first 10 bytes */
+       memset(&ht_info->basic_set, 0, 16);
+       memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
+
+       return pos + sizeof(struct ieee80211_ht_info);
+}
+
+enum nl80211_channel_type
+ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
+{
+       enum nl80211_channel_type channel_type;
+
+       if (!ht_info)
+               return NL80211_CHAN_NO_HT;
+
+       switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+               channel_type = NL80211_CHAN_HT20;
+               break;
+       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+               channel_type = NL80211_CHAN_HT40PLUS;
+               break;
+       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+               channel_type = NL80211_CHAN_HT40MINUS;
+               break;
+       default:
+               channel_type = NL80211_CHAN_NO_HT;
+       }
+
+       return channel_type;
+}
+
 int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
index a1c6bfd..68ad351 100644 (file)
@@ -330,13 +330,12 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
 
        ieee80211_tx_set_protected(tx);
 
-       skb = tx->skb;
-       do {
+       skb_queue_walk(&tx->skbs, skb) {
                if (wep_encrypt_skb(tx, skb) < 0) {
                        I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
                        return TX_DROP;
                }
-       } while ((skb = skb->next));
+       }
 
        return TX_CONTINUE;
 }
index fd52e69..89511be 100644 (file)
@@ -52,6 +52,30 @@ static int wme_downgrade_ac(struct sk_buff *skb)
        }
 }
 
+/* Indicate which queue to use for this fully formed 802.11 frame */
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+                                struct sk_buff *skb,
+                                struct ieee80211_hdr *hdr)
+{
+       u8 *p;
+
+       if (local->hw.queues < 4)
+               return 0;
+
+       if (!ieee80211_is_data(hdr->frame_control)) {
+               skb->priority = 7;
+               return ieee802_1d_to_ac[skb->priority];
+       }
+       if (!ieee80211_is_data_qos(hdr->frame_control)) {
+               skb->priority = 0;
+               return ieee802_1d_to_ac[skb->priority];
+       }
+
+       p = ieee80211_get_qos_ctl(hdr);
+       skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+
+       return ieee80211_downgrade_queue(local, skb);
+}
 
 /* Indicate which queue to use. */
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
@@ -83,7 +107,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                break;
 #ifdef CONFIG_MAC80211_MESH
        case NL80211_IFTYPE_MESH_POINT:
-               ra = skb->data;
+               qos = true;
                break;
 #endif
        case NL80211_IFTYPE_STATION:
@@ -139,16 +163,24 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
                           struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
        /* Fill in the QoS header if there is one. */
        if (ieee80211_is_data_qos(hdr->frame_control)) {
                u8 *p = ieee80211_get_qos_ctl(hdr);
-               u8 ack_policy = 0, tid;
+               u8 ack_policy, tid;
 
                tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
 
-               if (unlikely(sdata->local->wifi_wme_noack_test))
+               /* preserve EOSP bit */
+               ack_policy = *p & IEEE80211_QOS_CTL_EOSP;
+
+               if (is_multicast_ether_addr(hdr->addr1) ||
+                   sdata->noack_map & BIT(tid)) {
                        ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
+                       info->flags |= IEEE80211_TX_CTL_NO_ACK;
+               }
+
                /* qos header is 2 bytes */
                *p++ = ack_policy | tid;
                *p = ieee80211_vif_is_mesh(&sdata->vif) ?
index 34e166f..94edceb 100644 (file)
@@ -15,6 +15,9 @@
 
 extern const int ieee802_1d_to_ac[8];
 
+u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+                                struct sk_buff *skb,
+                                struct ieee80211_hdr *hdr);
 u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
                           struct sk_buff *skb);
 void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
index 6c53b6d..c6dd01a 100644 (file)
@@ -94,7 +94,8 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
 
 /* frame sending functions */
 
-static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
+static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
+                               struct sk_buff *skb, const u8 *ht_info_ie,
                                struct ieee80211_supported_band *sband,
                                struct ieee80211_channel *channel,
                                enum ieee80211_smps_mode smps)
@@ -102,8 +103,10 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
        struct ieee80211_ht_info *ht_info;
        u8 *pos;
        u32 flags = channel->flags;
-       u16 cap = sband->ht_cap.cap;
-       __le16 tmp;
+       u16 cap;
+       struct ieee80211_sta_ht_cap ht_cap;
+
+       BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
 
        if (!sband->ht_cap.ht_supported)
                return;
@@ -114,9 +117,13 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
        if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
                return;
 
+       memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+       ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
        ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
 
        /* determine capability flags */
+       cap = ht_cap.cap;
 
        switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
        case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
@@ -154,34 +161,8 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
        }
 
        /* reserve and fill IE */
-
        pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
-       *pos++ = WLAN_EID_HT_CAPABILITY;
-       *pos++ = sizeof(struct ieee80211_ht_cap);
-       memset(pos, 0, sizeof(struct ieee80211_ht_cap));
-
-       /* capability flags */
-       tmp = cpu_to_le16(cap);
-       memcpy(pos, &tmp, sizeof(u16));
-       pos += sizeof(u16);
-
-       /* AMPDU parameters */
-       *pos++ = sband->ht_cap.ampdu_factor |
-                (sband->ht_cap.ampdu_density <<
-                       IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
-
-       /* MCS set */
-       memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
-       pos += sizeof(sband->ht_cap.mcs);
-
-       /* extended capabilities */
-       pos += sizeof(__le16);
-
-       /* BF capabilities */
-       pos += sizeof(__le32);
-
-       /* antenna selection */
-       pos += sizeof(u8);
+       ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
 static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
@@ -356,7 +337,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
 
        if (wk->assoc.use_11n && wk->assoc.wmm_used &&
            local->hw.queues >= 4)
-               ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
+               ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
                                    sband, wk->chan, wk->assoc.smps);
 
        /* if present, add any custom non-vendor IEs that go after HT */
@@ -881,44 +862,6 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
        kfree_skb(skb);
 }
 
-static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
-                                      enum nl80211_channel_type oper_ct)
-{
-       switch (wk_ct) {
-       case NL80211_CHAN_NO_HT:
-               return true;
-       case NL80211_CHAN_HT20:
-               if (oper_ct != NL80211_CHAN_NO_HT)
-                       return true;
-               return false;
-       case NL80211_CHAN_HT40MINUS:
-       case NL80211_CHAN_HT40PLUS:
-               return (wk_ct == oper_ct);
-       }
-       WARN_ON(1); /* shouldn't get here */
-       return false;
-}
-
-static enum nl80211_channel_type
-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
-                 enum nl80211_channel_type oper_ct)
-{
-       switch (wk_ct) {
-       case NL80211_CHAN_NO_HT:
-               return oper_ct;
-       case NL80211_CHAN_HT20:
-               if (oper_ct != NL80211_CHAN_NO_HT)
-                       return oper_ct;
-               return wk_ct;
-       case NL80211_CHAN_HT40MINUS:
-       case NL80211_CHAN_HT40PLUS:
-               return wk_ct;
-       }
-       WARN_ON(1); /* shouldn't get here */
-       return wk_ct;
-}
-
-
 static void ieee80211_work_timer(unsigned long data)
 {
        struct ieee80211_local *local = (void *) data;
@@ -969,51 +912,12 @@ static void ieee80211_work_work(struct work_struct *work)
                }
 
                if (!started && !local->tmp_channel) {
-                       bool on_oper_chan;
-                       bool tmp_chan_changed = false;
-                       bool on_oper_chan2;
-                       enum nl80211_channel_type wk_ct;
-                       on_oper_chan = ieee80211_cfg_on_oper_channel(local);
-
-                       /* Work with existing channel type if possible. */
-                       wk_ct = wk->chan_type;
-                       if (wk->chan == local->hw.conf.channel)
-                               wk_ct = ieee80211_calc_ct(wk->chan_type,
-                                               local->hw.conf.channel_type);
-
-                       if (local->tmp_channel)
-                               if ((local->tmp_channel != wk->chan) ||
-                                   (local->tmp_channel_type != wk_ct))
-                                       tmp_chan_changed = true;
+                       ieee80211_offchannel_stop_vifs(local, true);
 
                        local->tmp_channel = wk->chan;
-                       local->tmp_channel_type = wk_ct;
-                       /*
-                        * Leave the station vifs in awake mode if they
-                        * happen to be on the same channel as
-                        * the requested channel.
-                        */
-                       on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
-                       if (on_oper_chan != on_oper_chan2) {
-                               if (on_oper_chan2) {
-                                       /* going off oper channel, PS too */
-                                       ieee80211_offchannel_stop_vifs(local,
-                                                                      true);
-                                       ieee80211_hw_config(local, 0);
-                               } else {
-                                       /* going on channel, but leave PS
-                                        * off-channel. */
-                                       ieee80211_hw_config(local, 0);
-                                       ieee80211_offchannel_return(local,
-                                                                   true,
-                                                                   false);
-                               }
-                       } else if (tmp_chan_changed)
-                               /* Still off-channel, but on some other
-                                * channel, so update hardware.
-                                * PS should already be off-channel.
-                                */
-                               ieee80211_hw_config(local, 0);
+                       local->tmp_channel_type = wk->chan_type;
+
+                       ieee80211_hw_config(local, 0);
 
                        started = true;
                        wk->timeout = jiffies;
@@ -1082,34 +986,17 @@ static void ieee80211_work_work(struct work_struct *work)
        list_for_each_entry(wk, &local->work_list, list) {
                if (!wk->started)
                        continue;
-               if (wk->chan != local->tmp_channel)
-                       continue;
-               if (!ieee80211_work_ct_coexists(wk->chan_type,
-                                               local->tmp_channel_type))
+               if (wk->chan != local->tmp_channel ||
+                   wk->chan_type != local->tmp_channel_type)
                        continue;
                remain_off_channel = true;
        }
 
        if (!remain_off_channel && local->tmp_channel) {
                local->tmp_channel = NULL;
-               /* If tmp_channel wasn't operating channel, then
-                * we need to go back on-channel.
-                * NOTE:  If we can ever be here while scannning,
-                * or if the hw_config() channel config logic changes,
-                * then we may need to do a more thorough check to see if
-                * we still need to do a hardware config.  Currently,
-                * we cannot be here while scanning, however.
-                */
-               if (!ieee80211_cfg_on_oper_channel(local))
-                       ieee80211_hw_config(local, 0);
+               ieee80211_hw_config(local, 0);
 
-               /* At the least, we need to disable offchannel_ps,
-                * so just go ahead and run the entire offchannel
-                * return logic here.  We *could* skip enabling
-                * beaconing if we were already on-oper-channel
-                * as a future optimization.
-                */
-               ieee80211_offchannel_return(local, true, true);
+               ieee80211_offchannel_return(local, true);
 
                /* give connection some time to breathe */
                run_again(local, jiffies + HZ/2);
index f614ce7..93aab07 100644 (file)
@@ -223,14 +223,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
 ieee80211_tx_result
 ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
+       struct sk_buff *skb;
 
        ieee80211_tx_set_protected(tx);
 
-       do {
+       skb_queue_walk(&tx->skbs, skb) {
                if (tkip_encrypt_skb(tx, skb) < 0)
                        return TX_DROP;
-       } while ((skb = skb->next));
+       }
 
        return TX_CONTINUE;
 }
@@ -390,7 +390,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        u8 scratch[6 * AES_BLOCK_SIZE];
 
        if (info->control.hw_key &&
-           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
+           !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) {
                /*
                 * hwaccel has no need for preallocated room for CCMP
                 * header or MIC fields
@@ -412,6 +413,12 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
 
        pos = skb_push(skb, CCMP_HDR_LEN);
        memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
+
+       /* the HW only needs room for the IV, but not the actual IV */
+       if (info->control.hw_key &&
+           (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
+               return 0;
+
        hdr = (struct ieee80211_hdr *) pos;
        pos += hdrlen;
 
@@ -442,14 +449,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
 ieee80211_tx_result
 ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
+       struct sk_buff *skb;
 
        ieee80211_tx_set_protected(tx);
 
-       do {
+       skb_queue_walk(&tx->skbs, skb) {
                if (ccmp_encrypt_skb(tx, skb) < 0)
                        return TX_DROP;
-       } while ((skb = skb->next));
+       }
 
        return TX_CONTINUE;
 }
@@ -547,15 +554,22 @@ static inline void bip_ipn_swap(u8 *d, const u8 *s)
 ieee80211_tx_result
 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
 {
-       struct sk_buff *skb = tx->skb;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
        struct ieee80211_key *key = tx->key;
        struct ieee80211_mmie *mmie;
        u8 aad[20];
        u64 pn64;
 
+       if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
+               return TX_DROP;
+
+       skb = skb_peek(&tx->skbs);
+
+       info = IEEE80211_SKB_CB(skb);
+
        if (info->control.hw_key)
-               return 0;
+               return TX_CONTINUE;
 
        if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
                return TX_DROP;
index afca6c7..4aa0f4b 100644 (file)
@@ -54,6 +54,12 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 
 struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
+
+#if defined(CONFIG_JUMP_LABEL)
+struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+EXPORT_SYMBOL(nf_hooks_needed);
+#endif
+
 static DEFINE_MUTEX(nf_hook_mutex);
 
 int nf_register_hook(struct nf_hook_ops *reg)
@@ -70,6 +76,9 @@ int nf_register_hook(struct nf_hook_ops *reg)
        }
        list_add_rcu(&reg->list, elem->list.prev);
        mutex_unlock(&nf_hook_mutex);
+#if defined(CONFIG_JUMP_LABEL)
+       jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
        return 0;
 }
 EXPORT_SYMBOL(nf_register_hook);
@@ -79,7 +88,9 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
        mutex_lock(&nf_hook_mutex);
        list_del_rcu(&reg->list);
        mutex_unlock(&nf_hook_mutex);
-
+#if defined(CONFIG_JUMP_LABEL)
+       jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+#endif
        synchronize_net();
 }
 EXPORT_SYMBOL(nf_unregister_hook);
index 052579f..b71a6e7 100644 (file)
@@ -116,9 +116,11 @@ ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
 {
        int protoff;
        u8 nexthdr;
+       __be16 frag_off;
 
        nexthdr = ipv6_hdr(skb)->nexthdr;
-       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+                                  &frag_off);
        if (protoff < 0)
                return false;
 
index f2d576e..4015fca 100644 (file)
@@ -241,7 +241,7 @@ hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
 static inline void
 hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
 {
-       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+       dst->ip.in6 = src->ip.in6;
 }
 
 static inline void
index 60d0165..2898819 100644 (file)
@@ -267,7 +267,7 @@ static inline void
 hash_net6_data_copy(struct hash_net6_elem *dst,
                    const struct hash_net6_elem *src)
 {
-       ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+       dst->ip.in6 = src->ip.in6;
        dst->cidr = src->cidr;
 }
 
index 093cc32..611c335 100644 (file)
@@ -983,7 +983,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
        if (!cp)
                return NF_ACCEPT;
 
-       ipv6_addr_copy(&snet.in6, &iph->saddr);
+       snet.in6 = iph->saddr;
        return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
                                    pp, offset, sizeof(struct ipv6hdr));
 }
index 3cdd479..bcf5563 100644 (file)
@@ -603,9 +603,9 @@ sloop:
 #ifdef CONFIG_IP_VS_IPV6
        if (cp->af == AF_INET6) {
                p += sizeof(struct ip_vs_sync_v6);
-               ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
-               ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
-               ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+               s->v6.caddr = cp->caddr.in6;
+               s->v6.vaddr = cp->vaddr.in6;
+               s->v6.daddr = cp->daddr.in6;
        } else
 #endif
        {
index aa2d720..38a576d 100644 (file)
@@ -235,7 +235,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
                        goto out_err;
                }
        }
-       ipv6_addr_copy(ret_saddr, &fl6.saddr);
+       *ret_saddr = fl6.saddr;
        return dst;
 
 out_err:
@@ -279,7 +279,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
                                  atomic_read(&rt->dst.__refcnt));
                }
                if (ret_saddr)
-                       ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
+                       *ret_saddr = dest->dst_saddr.in6;
                spin_unlock(&dest->dst_lock);
        } else {
                dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -705,7 +705,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        /* mangle the packet */
        if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
                goto tx_error;
-       ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
+       ipv6_hdr(skb)->daddr = cp->daddr.in6;
 
        if (!local || !skb->dev) {
                /* drop the old route when skb is not shared */
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
        iph->priority           =       old_iph->priority;
        memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
-       ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
-       ipv6_addr_copy(&iph->saddr, &saddr);
+       iph->daddr = cp->daddr.in6;
+       iph->saddr = saddr;
        iph->hop_limit          =       old_iph->hop_limit;
 
        /* Another hack: avoid icmp_send in ip_fragment */
index f03c2d4..f9368f3 100644 (file)
@@ -750,10 +750,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                struct rt6_info *rt1, *rt2;
 
                memset(&fl1, 0, sizeof(fl1));
-               ipv6_addr_copy(&fl1.daddr, &src->in6);
+               fl1.daddr = src->in6;
 
                memset(&fl2, 0, sizeof(fl2));
-               ipv6_addr_copy(&fl2.daddr, &dst->in6);
+               fl2.daddr = dst->in6;
                if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
                                   flowi6_to_flowi(&fl1), false)) {
                        if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
index 4bca15a..ba92824 100644 (file)
@@ -98,6 +98,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
        struct ipv6hdr _ip6h;
        const struct ipv6hdr *ih;
        u8 nexthdr;
+       __be16 frag_off;
        int offset;
 
        ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
@@ -108,7 +109,7 @@ static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
 
        nexthdr = ih->nexthdr;
        offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
-                                 &nexthdr);
+                                 &nexthdr, &frag_off);
 
        audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
                         &ih->saddr, &ih->daddr, nexthdr);
index 9e63b43..ba72262 100644 (file)
@@ -161,7 +161,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
                struct flowi6 *fl6 = &fl.u.ip6;
 
                memset(fl6, 0, sizeof(*fl6));
-               ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr);
+               fl6->daddr = ipv6_hdr(skb)->saddr;
        }
        rcu_read_lock();
        ai = nf_get_afinfo(family);
@@ -204,11 +204,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        u8 nexthdr;
+       __be16 frag_off;
        int tcphoff;
        int ret;
 
        nexthdr = ipv6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
        if (tcphoff < 0)
                return NF_DROP;
        ret = tcpmss_mangle_packet(skb, par->targinfo,
index 9dc9ecf..3a295cc 100644 (file)
@@ -87,9 +87,10 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        int tcphoff;
        u_int8_t nexthdr;
+       __be16 frag_off;
 
        nexthdr = ipv6h->nexthdr;
-       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr);
+       tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
        if (tcphoff < 0)
                return NF_DROP;
 
index 5f054a0..68349c3 100644 (file)
@@ -29,9 +29,6 @@
 #      define WITH_CONNTRACK 1
 #      include <net/netfilter/nf_conntrack.h>
 #endif
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-#      define WITH_IPV6 1
-#endif
 
 struct xt_tee_priv {
        struct notifier_block   notifier;
@@ -136,7 +133,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
 static bool
 tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
 {
@@ -196,7 +193,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        }
        return XT_CONTINUE;
 }
-#endif /* WITH_IPV6 */
+#endif
 
 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
                            void *ptr)
@@ -276,7 +273,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
                .destroy    = tee_tg_destroy,
                .me         = THIS_MODULE,
        },
-#ifdef WITH_IPV6
+#if IS_ENABLED(CONFIG_IPV6)
        {
                .name       = "TEE",
                .revision   = 1,
index b77d383..c047de2 100644 (file)
@@ -42,7 +42,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
        int route_err;
 
        memset(&flow, 0, sizeof(flow));
-       ipv6_addr_copy(&flow.daddr, addr);
+       flow.daddr = *addr;
        if (dev)
                flow.flowi6_oif = dev->ifindex;
 
index dfd52ba..068698f 100644 (file)
@@ -445,6 +445,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
 {
        __be16 _ports[2], *ports;
        u8 nexthdr;
+       __be16 frag_off;
        int poff;
 
        memset(dst, 0, sizeof(*dst));
@@ -480,7 +481,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
                        return 0;
                nexthdr = ipv6_hdr(skb)->nexthdr;
-               protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+               protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off);
                if ((int)protoff < 0)
                        return -1;
                break;
index fe39f7e..c302e30 100644 (file)
@@ -214,6 +214,7 @@ extract_icmp6_fields(const struct sk_buff *skb,
        struct icmp6hdr *icmph, _icmph;
        __be16 *ports, _ports[2];
        u8 inside_nexthdr;
+       __be16 inside_fragoff;
        int inside_hdrlen;
 
        icmph = skb_header_pointer(skb, outside_hdrlen,
@@ -229,7 +230,8 @@ extract_icmp6_fields(const struct sk_buff *skb,
                return 1;
        inside_nexthdr = inside_iph->nexthdr;
 
-       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), &inside_nexthdr);
+       inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph),
+                                        &inside_nexthdr, &inside_fragoff);
        if (inside_hdrlen < 0)
                return 1; /* hjm: Packet has no/incomplete transport layer headers. */
 
index 96b749d..6f17013 100644 (file)
@@ -96,7 +96,7 @@ struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr,
 }
 
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_search - Search for a matching IPv6 address entry
  * @addr: IPv6 address
@@ -185,7 +185,7 @@ int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head)
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_add - Add a new IPv6 address entry to a list
  * @entry: address entry
@@ -263,7 +263,7 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
        return entry;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_remove_entry - Remove an IPv6 address entry
  * @entry: address entry
@@ -342,7 +342,7 @@ void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
        }
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_af6list_audit_addr - Audit an IPv6 address
  * @audit_buf: audit buffer
index fdbc1d2..a1287ce 100644 (file)
@@ -133,7 +133,7 @@ static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf,
 }
 #endif
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 #define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list)
 
index 3f905e5..3820411 100644 (file)
@@ -78,7 +78,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
        struct netlbl_dom_map *ptr;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -90,7 +90,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
                        netlbl_af4list_remove_entry(iter4);
                        kfree(netlbl_domhsh_addr4_entry(iter4));
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_safe(iter6, tmp6,
                                            &ptr->type_def.addrsel->list6) {
                        netlbl_af6list_remove_entry(iter6);
@@ -217,7 +217,7 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
                        cipsov4 = map4->type_def.cipsov4;
                        netlbl_af4list_audit_addr(audit_buf, 0, NULL,
                                                  addr4->addr, addr4->mask);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                } else if (addr6 != NULL) {
                        struct netlbl_domaddr6_map *map6;
                        map6 = netlbl_domhsh_addr6_entry(addr6);
@@ -306,7 +306,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
        struct netlbl_dom_map *entry_old;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -338,7 +338,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                                               &entry->type_def.addrsel->list4)
                                netlbl_domhsh_audit_add(entry, iter4, NULL,
                                                        ret_val, audit_info);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        netlbl_af6list_foreach_rcu(iter6,
                                               &entry->type_def.addrsel->list6)
                                netlbl_domhsh_audit_add(entry, NULL, iter6,
@@ -365,7 +365,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                                ret_val = -EEXIST;
                                goto add_return;
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_rcu(iter6,
                                           &entry->type_def.addrsel->list6)
                        if (netlbl_af6list_search_exact(&iter6->addr,
@@ -386,7 +386,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
                        if (ret_val != 0)
                                goto add_return;
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_safe(iter6, tmp6,
                                            &entry->type_def.addrsel->list6) {
                        netlbl_af6list_remove_entry(iter6);
@@ -510,7 +510,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
        struct netlbl_dom_map *entry_map;
        struct netlbl_af4list *entry_addr;
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif /* IPv6 */
        struct netlbl_domaddr4_map *entry;
@@ -533,7 +533,7 @@ int netlbl_domhsh_remove_af4(const char *domain,
                goto remove_af4_failure;
        netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
                goto remove_af4_single_addr;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
                goto remove_af4_single_addr;
 #endif /* IPv6 */
@@ -644,7 +644,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
        return netlbl_domhsh_addr4_entry(addr_iter);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
  * @domain: the domain name to search for
index bfcc0f7..90872c4 100644 (file)
@@ -104,7 +104,7 @@ int netlbl_domhsh_walk(u32 *skip_bkt,
                     int (*callback) (struct netlbl_dom_map *entry, void *arg),
                     void *cb_arg);
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
                                                  const struct in6_addr *addr);
 #endif /* IPv6 */
index 824f184..2560e7b 100644 (file)
@@ -147,7 +147,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                                goto cfg_unlbl_map_add_failure;
                        break;
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6: {
                        const struct in6_addr *addr6 = addr;
                        const struct in6_addr *mask6 = mask;
@@ -155,12 +155,12 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                        if (map6 == NULL)
                                goto cfg_unlbl_map_add_failure;
                        map6->type = NETLBL_NLTYPE_UNLABELED;
-                       ipv6_addr_copy(&map6->list.addr, addr6);
+                       map6->list.addr = *addr6;
                        map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
                        map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
                        map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2];
                        map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
-                       ipv6_addr_copy(&map6->list.mask, mask6);
+                       map6->list.mask = *mask6;
                        map6->list.valid = 1;
                        ret_val = netlbl_af6list_add(&map6->list,
                                                     &addrmap->list6);
@@ -227,7 +227,7 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
@@ -270,7 +270,7 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
@@ -673,7 +673,7 @@ int netlbl_sock_setattr(struct sock *sk,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -724,7 +724,7 @@ int netlbl_sock_getattr(struct sock *sk,
        case AF_INET:
                ret_val = cipso_v4_sock_getattr(sk, secattr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                ret_val = -ENOMSG;
                break;
@@ -782,7 +782,7 @@ int netlbl_conn_setattr(struct sock *sk,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -853,7 +853,7 @@ int netlbl_req_setattr(struct request_sock *req,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -926,7 +926,7 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
                        ret_val = -ENOENT;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                /* since we don't support any IPv6 labeling protocols right
                 * now we can optimize everything away until we do */
@@ -965,7 +965,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
                    cipso_v4_skbuff_getattr(skb, secattr) == 0)
                        return 0;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                break;
 #endif /* IPv6 */
index bfa5558..4809e2e 100644 (file)
@@ -184,7 +184,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
 
                entry->type = NETLBL_NLTYPE_ADDRSELECT;
                entry->type_def.addrsel = addrmap;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        } else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
                struct in6_addr *addr;
                struct in6_addr *mask;
@@ -216,12 +216,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
                        ret_val = -ENOMEM;
                        goto add_failure;
                }
-               ipv6_addr_copy(&map->list.addr, addr);
+               map->list.addr = *addr;
                map->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
                map->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
                map->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
                map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
-               ipv6_addr_copy(&map->list.mask, mask);
+               map->list.mask = *mask;
                map->list.valid = 1;
                map->type = entry->type;
 
@@ -270,7 +270,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
        struct nlattr *nla_a;
        struct nlattr *nla_b;
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif
 
@@ -324,7 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
 
                        nla_nest_end(skb, nla_b);
                }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                netlbl_af6list_foreach_rcu(iter6,
                                           &entry->type_def.addrsel->list6) {
                        struct netlbl_domaddr6_map *map6;
index e251c2c..4b5fa0f 100644 (file)
@@ -170,7 +170,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
        struct netlbl_unlhsh_iface *iface;
        struct netlbl_af4list *iter4;
        struct netlbl_af4list *tmp4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
        struct netlbl_af6list *tmp6;
 #endif /* IPv6 */
@@ -184,7 +184,7 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry)
                netlbl_af4list_remove_entry(iter4);
                kfree(netlbl_unlhsh_addr4_entry(iter4));
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_safe(iter6, tmp6, &iface->addr6_list) {
                netlbl_af6list_remove_entry(iter6);
                kfree(netlbl_unlhsh_addr6_entry(iter6));
@@ -274,7 +274,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
        return ret_val;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table
  * @iface: the associated interface entry
@@ -300,12 +300,12 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
        if (entry == NULL)
                return -ENOMEM;
 
-       ipv6_addr_copy(&entry->list.addr, addr);
+       entry->list.addr = *addr;
        entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0];
        entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1];
        entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2];
        entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
-       ipv6_addr_copy(&entry->list.mask, mask);
+       entry->list.mask = *mask;
        entry->list.valid = 1;
        entry->secid = secid;
 
@@ -436,7 +436,7 @@ int netlbl_unlhsh_add(struct net *net,
                                                  mask4->s_addr);
                break;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case sizeof(struct in6_addr): {
                const struct in6_addr *addr6 = addr;
                const struct in6_addr *mask6 = mask;
@@ -531,7 +531,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
        return 0;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /**
  * netlbl_unlhsh_remove_addr6 - Remove an IPv6 address entry
  * @net: network namespace
@@ -606,14 +606,14 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
 static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
 {
        struct netlbl_af4list *iter4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *iter6;
 #endif /* IPv6 */
 
        spin_lock(&netlbl_unlhsh_lock);
        netlbl_af4list_foreach_rcu(iter4, &iface->addr4_list)
                goto unlhsh_condremove_failure;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(iter6, &iface->addr6_list)
                goto unlhsh_condremove_failure;
 #endif /* IPv6 */
@@ -680,7 +680,7 @@ int netlbl_unlhsh_remove(struct net *net,
                                                     iface, addr, mask,
                                                     audit_info);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case sizeof(struct in6_addr):
                ret_val = netlbl_unlhsh_remove_addr6(net,
                                                     iface, addr, mask,
@@ -1196,7 +1196,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
        struct netlbl_unlhsh_iface *iface;
        struct list_head *iter_list;
        struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct netlbl_af6list *addr6;
 #endif
 
@@ -1228,7 +1228,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
                                        goto unlabel_staticlist_return;
                                }
                        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                        netlbl_af6list_foreach_rcu(addr6,
                                                   &iface->addr6_list) {
                                if (iter_addr6++ < skip_addr6)
@@ -1277,7 +1277,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
        u32 skip_addr6 = cb->args[1];
        u32 iter_addr4 = 0;
        struct netlbl_af4list *addr4;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        u32 iter_addr6 = 0;
        struct netlbl_af6list *addr6;
 #endif
@@ -1303,7 +1303,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
                        goto unlabel_staticlistdef_return;
                }
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
                if (iter_addr6++ < skip_addr6)
                        continue;
@@ -1494,7 +1494,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
                secattr->attr.secid = netlbl_unlhsh_addr4_entry(addr4)->secid;
                break;
        }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6: {
                struct ipv6hdr *hdr6;
                struct netlbl_af6list *addr6;
index 482fa57..28453ae 100644 (file)
@@ -33,6 +33,14 @@ void genl_unlock(void)
 }
 EXPORT_SYMBOL(genl_unlock);
 
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_genl_is_held(void)
+{
+       return lockdep_is_held(&genl_mutex);
+}
+EXPORT_SYMBOL(lockdep_genl_is_held);
+#endif
+
 #define GENL_FAM_TAB_SIZE      16
 #define GENL_FAM_TAB_MASK      (GENL_FAM_TAB_SIZE - 1)
 
@@ -946,3 +954,16 @@ int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
        return genlmsg_mcast(skb, pid, group, flags);
 }
 EXPORT_SYMBOL(genlmsg_multicast_allns);
+
+void genl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+                struct nlmsghdr *nlh, gfp_t flags)
+{
+       struct sock *sk = net->genl_sock;
+       int report = 0;
+
+       if (nlh)
+               report = nlmsg_report(nlh);
+
+       nlmsg_notify(sk, skb, pid, group, report, flags);
+}
+EXPORT_SYMBOL(genl_notify);
index 732152f..c329b47 100644 (file)
@@ -1244,7 +1244,8 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        case SIOCADDRT:
        case SIOCDELRT:
        case SIOCNRDECOBS:
-               if (!capable(CAP_NET_ADMIN)) return -EPERM;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
                return nr_rt_ioctl(cmd, argp);
 
        default:
index 915a87b..2cf3301 100644 (file)
@@ -670,14 +670,17 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
        case SIOCADDRT:
                if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
                        return -EFAULT;
-               if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
+               if (nr_route.ndigis > AX25_MAX_DIGIS)
                        return -EINVAL;
-               if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) {
-                       dev_put(dev);
+               if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
                        return -EINVAL;
-               }
                switch (nr_route.type) {
                case NETROM_NODE:
+                       if (strnlen(nr_route.mnemonic, 7) == 7) {
+                               ret = -EINVAL;
+                               break;
+                       }
+
                        ret = nr_add_node(&nr_route.callsign,
                                nr_route.mnemonic,
                                &nr_route.neighbour,
index 47e02c1..3ebc6b3 100644 (file)
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 int nfc_devlist_generation;
 DEFINE_MUTEX(nfc_devlist_mutex);
 
-int nfc_printk(const char *level, const char *format, ...)
-{
-       struct va_format vaf;
-       va_list args;
-       int r;
-
-       va_start(args, format);
-
-       vaf.fmt = format;
-       vaf.va = &args;
-
-       r = printk("%sNFC: %pV\n", level, &vaf);
-
-       va_end(args);
-
-       return r;
-}
-EXPORT_SYMBOL(nfc_printk);
-
 /**
  * nfc_dev_up - turn on the NFC device
  *
@@ -63,7 +46,7 @@ int nfc_dev_up(struct nfc_dev *dev)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        device_lock(&dev->dev);
 
@@ -97,7 +80,7 @@ int nfc_dev_down(struct nfc_dev *dev)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        device_lock(&dev->dev);
 
@@ -139,7 +122,8 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols);
+       pr_debug("dev_name=%s protocols=0x%x\n",
+                dev_name(&dev->dev), protocols);
 
        if (!protocols)
                return -EINVAL;
@@ -174,7 +158,7 @@ int nfc_stop_poll(struct nfc_dev *dev)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        device_lock(&dev->dev);
 
@@ -207,8 +191,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev),
-                                                       target_idx, protocol);
+       pr_debug("dev_name=%s target_idx=%u protocol=%u\n",
+                dev_name(&dev->dev), target_idx, protocol);
 
        device_lock(&dev->dev);
 
@@ -236,7 +220,8 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
 {
        int rc = 0;
 
-       nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx);
+       pr_debug("dev_name=%s target_idx=%u\n",
+                dev_name(&dev->dev), target_idx);
 
        device_lock(&dev->dev);
 
@@ -271,8 +256,8 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
 {
        int rc;
 
-       nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev),
-                                                       target_idx, skb->len);
+       pr_debug("dev_name=%s target_idx=%u skb->len=%u\n",
+                dev_name(&dev->dev), target_idx, skb->len);
 
        device_lock(&dev->dev);
 
@@ -326,7 +311,7 @@ int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
 {
        int i;
 
-       nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets);
+       pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
 
        dev->polling = false;
 
@@ -360,7 +345,7 @@ static void nfc_release(struct device *d)
 {
        struct nfc_dev *dev = to_nfc_dev(d);
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        nfc_genl_data_exit(&dev->genl_data);
        kfree(dev->targets);
@@ -446,7 +431,7 @@ int nfc_register_device(struct nfc_dev *dev)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        mutex_lock(&nfc_devlist_mutex);
        nfc_devlist_generation++;
@@ -458,9 +443,8 @@ int nfc_register_device(struct nfc_dev *dev)
 
        rc = nfc_genl_device_added(dev);
        if (rc)
-               nfc_dbg("The userspace won't be notified that the device %s was"
-                                               " added", dev_name(&dev->dev));
-
+               pr_debug("The userspace won't be notified that the device %s was added\n",
+                        dev_name(&dev->dev));
 
        return 0;
 }
@@ -475,7 +459,7 @@ void nfc_unregister_device(struct nfc_dev *dev)
 {
        int rc;
 
-       nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        mutex_lock(&nfc_devlist_mutex);
        nfc_devlist_generation++;
@@ -490,8 +474,8 @@ void nfc_unregister_device(struct nfc_dev *dev)
 
        rc = nfc_genl_device_removed(dev);
        if (rc)
-               nfc_dbg("The userspace won't be notified that the device %s"
-                                       " was removed", dev_name(&dev->dev));
+               pr_debug("The userspace won't be notified that the device %s was removed\n",
+                        dev_name(&dev->dev));
 
 }
 EXPORT_SYMBOL(nfc_unregister_device);
@@ -500,7 +484,7 @@ static int __init nfc_init(void)
 {
        int rc;
 
-       nfc_info("NFC Core ver %s", VERSION);
+       pr_info("NFC Core ver %s\n", VERSION);
 
        rc = class_register(&nfc_class);
        if (rc)
index 3925c65..37de28e 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <linux/completion.h>
@@ -79,7 +81,7 @@ static int __nci_request(struct nci_dev *ndev,
                                                        &ndev->req_completion,
                                                        timeout);
 
-       nfc_dbg("wait_for_completion return %ld", completion_rc);
+       pr_debug("wait_for_completion return %ld\n", completion_rc);
 
        if (completion_rc > 0) {
                switch (ndev->req_status) {
@@ -96,8 +98,8 @@ static int __nci_request(struct nci_dev *ndev,
                        break;
                }
        } else {
-               nfc_err("wait_for_completion_interruptible_timeout failed %ld",
-                       completion_rc);
+               pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
+                      completion_rc);
 
                rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
        }
@@ -126,7 +128,10 @@ static inline int nci_request(struct nci_dev *ndev,
 
 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
 {
-       nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+       struct nci_core_reset_cmd cmd;
+
+       cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
+       nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
 }
 
 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
@@ -136,17 +141,11 @@ static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
 
 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
 {
-       struct nci_core_conn_create_cmd conn_cmd;
        struct nci_rf_disc_map_cmd cmd;
        struct disc_map_config *cfg = cmd.mapping_configs;
        __u8 *num = &cmd.num_mapping_configs;
        int i;
 
-       /* create static rf connection */
-       conn_cmd.target_handle = 0;
-       conn_cmd.num_target_specific_params = 0;
-       nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
-
        /* set rf mapping configurations */
        *num = 0;
 
@@ -326,8 +325,6 @@ static void nci_cmd_timer(unsigned long arg)
 {
        struct nci_dev *ndev = (void *) arg;
 
-       nfc_dbg("entry");
-
        atomic_set(&ndev->cmd_cnt, 1);
        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 }
@@ -336,8 +333,6 @@ static int nci_dev_up(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry");
-
        return nci_open_device(ndev);
 }
 
@@ -345,8 +340,6 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry");
-
        return nci_close_device(ndev);
 }
 
@@ -355,20 +348,18 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
        int rc;
 
-       nfc_dbg("entry");
-
        if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
-               nfc_err("unable to start poll, since poll is already active");
+               pr_err("unable to start poll, since poll is already active\n");
                return -EBUSY;
        }
 
        if (ndev->target_active_prot) {
-               nfc_err("there is an active target");
+               pr_err("there is an active target\n");
                return -EBUSY;
        }
 
        if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
-               nfc_dbg("target is active, implicitly deactivate...");
+               pr_debug("target is active, implicitly deactivate...\n");
 
                rc = nci_request(ndev, nci_rf_deactivate_req, 0,
                        msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
@@ -389,10 +380,8 @@ static void nci_stop_poll(struct nfc_dev *nfc_dev)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry");
-
        if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
-               nfc_err("unable to stop poll, since poll is not active");
+               pr_err("unable to stop poll, since poll is not active\n");
                return;
        }
 
@@ -405,21 +394,21 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+       pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
 
        if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
-               nfc_err("there is no available target to activate");
+               pr_err("there is no available target to activate\n");
                return -EINVAL;
        }
 
        if (ndev->target_active_prot) {
-               nfc_err("there is already an active target");
+               pr_err("there is already an active target\n");
                return -EBUSY;
        }
 
        if (!(ndev->target_available_prots & (1 << protocol))) {
-               nfc_err("target does not support the requested protocol 0x%x",
-                       protocol);
+               pr_err("target does not support the requested protocol 0x%x\n",
+                      protocol);
                return -EINVAL;
        }
 
@@ -433,10 +422,10 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
 {
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dbg("entry, target_idx %d", target_idx);
+       pr_debug("target_idx %d\n", target_idx);
 
        if (!ndev->target_active_prot) {
-               nfc_err("unable to deactivate target, no active target");
+               pr_err("unable to deactivate target, no active target\n");
                return;
        }
 
@@ -456,10 +445,10 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
        struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
        int rc;
 
-       nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+       pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
 
        if (!ndev->target_active_prot) {
-               nfc_err("unable to exchange data, no active target");
+               pr_err("unable to exchange data, no active target\n");
                return -EINVAL;
        }
 
@@ -470,7 +459,7 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
        ndev->data_exchange_cb = cb;
        ndev->data_exchange_cb_context = cb_context;
 
-       rc = nci_send_data(ndev, ndev->conn_id, skb);
+       rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
        if (rc)
                clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
 
@@ -502,7 +491,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
 {
        struct nci_dev *ndev;
 
-       nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+       pr_debug("supported_protocols 0x%x\n", supported_protocols);
 
        if (!ops->open || !ops->close || !ops->send)
                return NULL;
@@ -542,8 +531,6 @@ EXPORT_SYMBOL(nci_allocate_device);
  */
 void nci_free_device(struct nci_dev *ndev)
 {
-       nfc_dbg("entry");
-
        nfc_free_device(ndev->nfc_dev);
        kfree(ndev);
 }
@@ -560,8 +547,6 @@ int nci_register_device(struct nci_dev *ndev)
        struct device *dev = &ndev->nfc_dev->dev;
        char name[32];
 
-       nfc_dbg("entry");
-
        rc = nfc_register_device(ndev->nfc_dev);
        if (rc)
                goto exit;
@@ -624,8 +609,6 @@ EXPORT_SYMBOL(nci_register_device);
  */
 void nci_unregister_device(struct nci_dev *ndev)
 {
-       nfc_dbg("entry");
-
        nci_close_device(ndev);
 
        destroy_workqueue(ndev->cmd_wq);
@@ -645,7 +628,7 @@ int nci_recv_frame(struct sk_buff *skb)
 {
        struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 
-       nfc_dbg("entry, len %d", skb->len);
+       pr_debug("len %d\n", skb->len);
 
        if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
                && !test_bit(NCI_INIT, &ndev->flags))) {
@@ -665,7 +648,7 @@ static int nci_send_frame(struct sk_buff *skb)
 {
        struct nci_dev *ndev = (struct nci_dev *) skb->dev;
 
-       nfc_dbg("entry, len %d", skb->len);
+       pr_debug("len %d\n", skb->len);
 
        if (!ndev) {
                kfree_skb(skb);
@@ -684,11 +667,11 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
        struct nci_ctrl_hdr *hdr;
        struct sk_buff *skb;
 
-       nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+       pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
 
        skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
        if (!skb) {
-               nfc_err("no memory for command");
+               pr_err("no memory for command\n");
                return -ENOMEM;
        }
 
@@ -718,7 +701,7 @@ static void nci_tx_work(struct work_struct *work)
        struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
        struct sk_buff *skb;
 
-       nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+       pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
 
        /* Send queued tx data */
        while (atomic_read(&ndev->credits_cnt)) {
@@ -726,12 +709,15 @@ static void nci_tx_work(struct work_struct *work)
                if (!skb)
                        return;
 
-               atomic_dec(&ndev->credits_cnt);
+               /* Check if data flow control is used */
+               if (atomic_read(&ndev->credits_cnt) !=
+                               NCI_DATA_FLOW_CONTROL_NOT_USED)
+                       atomic_dec(&ndev->credits_cnt);
 
-               nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
-                               nci_pbf(skb->data),
-                               nci_conn_id(skb->data),
-                               nci_plen(skb->data));
+               pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+                        nci_pbf(skb->data),
+                        nci_conn_id(skb->data),
+                        nci_plen(skb->data));
 
                nci_send_frame(skb);
        }
@@ -760,7 +746,7 @@ static void nci_rx_work(struct work_struct *work)
                        break;
 
                default:
-                       nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+                       pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
                        kfree_skb(skb);
                        break;
                }
@@ -774,7 +760,7 @@ static void nci_cmd_work(struct work_struct *work)
        struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
        struct sk_buff *skb;
 
-       nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+       pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
 
        /* Send queued command */
        if (atomic_read(&ndev->cmd_cnt)) {
@@ -784,11 +770,11 @@ static void nci_cmd_work(struct work_struct *work)
 
                atomic_dec(&ndev->cmd_cnt);
 
-               nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-                               nci_pbf(skb->data),
-                               nci_opcode_gid(nci_opcode(skb->data)),
-                               nci_opcode_oid(nci_opcode(skb->data)),
-                               nci_plen(skb->data));
+               pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+                        nci_pbf(skb->data),
+                        nci_opcode_gid(nci_opcode(skb->data)),
+                        nci_opcode_oid(nci_opcode(skb->data)),
+                        nci_plen(skb->data));
 
                nci_send_frame(skb);
 
index e5ed90f..1e040fe 100644 (file)
@@ -21,6 +21,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/wait.h>
@@ -40,7 +42,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
        data_exchange_cb_t cb = ndev->data_exchange_cb;
        void *cb_context = ndev->data_exchange_cb_context;
 
-       nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
+       pr_debug("len %d, err %d\n", skb ? skb->len : 0, err);
 
        if (cb) {
                ndev->data_exchange_cb = NULL;
@@ -49,7 +51,7 @@ void nci_data_exchange_complete(struct nci_dev *ndev,
                /* forward skb to nfc core */
                cb(cb_context, skb, err);
        } else if (skb) {
-               nfc_err("no rx callback, dropping rx data...");
+               pr_err("no rx callback, dropping rx data...\n");
 
                /* no waiting callback, free skb */
                kfree_skb(skb);
@@ -90,12 +92,13 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
        int frag_len;
        int rc = 0;
 
-       nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
+       pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);
 
        __skb_queue_head_init(&frags_q);
 
        while (total_len) {
-               frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
+               frag_len =
+                       min_t(int, total_len, ndev->max_data_pkt_payload_size);
 
                skb_frag = nci_skb_alloc(ndev,
                                        (NCI_DATA_HDR_SIZE + frag_len),
@@ -118,8 +121,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
                data += frag_len;
                total_len -= frag_len;
 
-               nfc_dbg("frag_len %d, remaining total_len %d",
-                       frag_len, total_len);
+               pr_debug("frag_len %d, remaining total_len %d\n",
+                        frag_len, total_len);
        }
 
        /* queue all fragments atomically */
@@ -148,10 +151,10 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
 {
        int rc = 0;
 
-       nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
+       pr_debug("conn_id 0x%x, plen %d\n", conn_id, skb->len);
 
        /* check if the packet need to be fragmented */
-       if (skb->len <= ndev->max_pkt_payload_size) {
+       if (skb->len <= ndev->max_data_pkt_payload_size) {
                /* no need to fragment packet */
                nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
 
@@ -160,7 +163,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
                /* fragment packet and queue the fragments */
                rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
                if (rc) {
-                       nfc_err("failed to fragment tx data packet");
+                       pr_err("failed to fragment tx data packet\n");
                        goto free_exit;
                }
        }
@@ -190,7 +193,7 @@ static void nci_add_rx_data_frag(struct nci_dev *ndev,
 
                /* first, make enough room for the already accumulated data */
                if (skb_cow_head(skb, reassembly_len)) {
-                       nfc_err("error adding room for accumulated rx data");
+                       pr_err("error adding room for accumulated rx data\n");
 
                        kfree_skb(skb);
                        skb = 0;
@@ -227,19 +230,19 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u8 pbf = nci_pbf(skb->data);
 
-       nfc_dbg("entry, len %d", skb->len);
+       pr_debug("len %d\n", skb->len);
 
-       nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
-                       nci_pbf(skb->data),
-                       nci_conn_id(skb->data),
-                       nci_plen(skb->data));
+       pr_debug("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
+                nci_pbf(skb->data),
+                nci_conn_id(skb->data),
+                nci_plen(skb->data));
 
        /* strip the nci data header */
        skb_pull(skb, NCI_DATA_HDR_SIZE);
 
        if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
                /* frame I/F => remove the status byte */
-               nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
+               pr_debug("NFC_PROTO_MIFARE => remove the status byte\n");
                skb_trim(skb, (skb->len - 1));
        }
 
index b19dc2f..e99adcf 100644 (file)
@@ -42,12 +42,9 @@ int nci_to_errno(__u8 code)
        case NCI_STATUS_REJECTED:
                return -EBUSY;
 
-       case NCI_STATUS_MESSAGE_CORRUPTED:
+       case NCI_STATUS_RF_FRAME_CORRUPTED:
                return -EBADMSG;
 
-       case NCI_STATUS_BUFFER_FULL:
-               return -ENOBUFS;
-
        case NCI_STATUS_NOT_INITIALIZED:
                return -EHOSTDOWN;
 
@@ -80,9 +77,6 @@ int nci_to_errno(__u8 code)
        case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
                return -ETIMEDOUT;
 
-       case NCI_STATUS_RF_LINK_LOSS_ERROR:
-               return -ENOLINK;
-
        case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
                return -EDQUOT;
 
index 96633f5..c36bd4a 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -43,18 +45,18 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
        struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
        int i;
 
-       nfc_dbg("entry, num_entries %d", ntf->num_entries);
+       pr_debug("num_entries %d\n", ntf->num_entries);
 
        if (ntf->num_entries > NCI_MAX_NUM_CONN)
                ntf->num_entries = NCI_MAX_NUM_CONN;
 
        /* update the credits */
        for (i = 0; i < ntf->num_entries; i++) {
-               nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
-                       ntf->conn_entries[i].conn_id,
-                       ntf->conn_entries[i].credits);
+               pr_debug("entry[%d]: conn_id %d, credits %d\n",
+                        i, ntf->conn_entries[i].conn_id,
+                        ntf->conn_entries[i].credits);
 
-               if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
+               if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
                        /* found static rf connection */
                        atomic_add(ntf->conn_entries[i].credits,
                                &ndev->credits_cnt);
@@ -66,31 +68,20 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
                queue_work(ndev->tx_wq, &ndev->tx_work);
 }
 
-static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
-                                       struct sk_buff *skb)
-{
-       struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
-
-       nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
-}
-
-static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
-                       struct nci_rf_activate_ntf *ntf, __u8 *data)
+static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
+                       struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
 {
        struct rf_tech_specific_params_nfca_poll *nfca_poll;
-       struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
 
        nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
-       nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
 
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
        nfca_poll->nfcid1_len = *data++;
 
-       nfc_dbg("sens_res 0x%x, nfcid1_len %d",
-               nfca_poll->sens_res,
-               nfca_poll->nfcid1_len);
+       pr_debug("sens_res 0x%x, nfcid1_len %d\n",
+                nfca_poll->sens_res, nfca_poll->nfcid1_len);
 
        memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
        data += nfca_poll->nfcid1_len;
@@ -100,32 +91,32 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
        if (nfca_poll->sel_res_len != 0)
                nfca_poll->sel_res = *data++;
 
-       ntf->rf_interface_type = *data++;
-       ntf->activation_params_len = *data++;
+       pr_debug("sel_res_len %d, sel_res 0x%x\n",
+                nfca_poll->sel_res_len,
+                nfca_poll->sel_res);
 
-       nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
-               nfca_poll->sel_res_len,
-               nfca_poll->sel_res,
-               ntf->rf_interface_type,
-               ntf->activation_params_len);
+       return data;
+}
 
-       switch (ntf->rf_interface_type) {
-       case NCI_RF_INTERFACE_ISO_DEP:
-               nfca_poll_iso_dep->rats_res_len = *data++;
-               if (nfca_poll_iso_dep->rats_res_len > 0) {
-                       memcpy(nfca_poll_iso_dep->rats_res,
+static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
+                       struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
+{
+       struct activation_params_nfca_poll_iso_dep *nfca_poll;
+
+       switch (ntf->activation_rf_tech_and_mode) {
+       case NCI_NFC_A_PASSIVE_POLL_MODE:
+               nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
+               nfca_poll->rats_res_len = *data++;
+               if (nfca_poll->rats_res_len > 0) {
+                       memcpy(nfca_poll->rats_res,
                                data,
-                               nfca_poll_iso_dep->rats_res_len);
+                               nfca_poll->rats_res_len);
                }
                break;
 
-       case NCI_RF_INTERFACE_FRAME:
-               /* no activation params */
-               break;
-
        default:
-               nfc_err("unsupported rf_interface_type 0x%x",
-                       ntf->rf_interface_type);
+               pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+                      ntf->activation_rf_tech_and_mode);
                return -EPROTO;
        }
 
@@ -133,7 +124,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
 }
 
 static void nci_target_found(struct nci_dev *ndev,
-                               struct nci_rf_activate_ntf *ntf)
+                               struct nci_rf_intf_activated_ntf *ntf)
 {
        struct nfc_target nfc_tgt;
 
@@ -141,66 +132,105 @@ static void nci_target_found(struct nci_dev *ndev,
                nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
        else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)   /* 4A */
                nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
+       else
+               nfc_tgt.supported_protocols = 0;
 
        nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
        nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
 
        if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
-               nfc_dbg("the target found does not have the desired protocol");
+               pr_debug("the target found does not have the desired protocol\n");
                return;
        }
 
-       nfc_dbg("new target found,  supported_protocols 0x%x",
-               nfc_tgt.supported_protocols);
+       pr_debug("new target found,  supported_protocols 0x%x\n",
+                nfc_tgt.supported_protocols);
 
        ndev->target_available_prots = nfc_tgt.supported_protocols;
 
        nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
 }
 
-static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
-                                       struct sk_buff *skb)
+static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
+                                               struct sk_buff *skb)
 {
-       struct nci_rf_activate_ntf ntf;
+       struct nci_rf_intf_activated_ntf ntf;
        __u8 *data = skb->data;
-       int rc = -1;
+       int err = 0;
 
        clear_bit(NCI_DISCOVERY, &ndev->flags);
        set_bit(NCI_POLL_ACTIVE, &ndev->flags);
 
-       ntf.target_handle = *data++;
+       ntf.rf_discovery_id = *data++;
+       ntf.rf_interface_type = *data++;
        ntf.rf_protocol = *data++;
-       ntf.rf_tech_and_mode = *data++;
+       ntf.activation_rf_tech_and_mode = *data++;
        ntf.rf_tech_specific_params_len = *data++;
 
-       nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
-               ntf.target_handle,
-               ntf.rf_protocol,
-               ntf.rf_tech_and_mode,
-               ntf.rf_tech_specific_params_len);
-
-       switch (ntf.rf_tech_and_mode) {
-       case NCI_NFC_A_PASSIVE_POLL_MODE:
-               rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
-                       data);
-               break;
+       pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id);
+       pr_debug("rf_interface_type 0x%x\n", ntf.rf_interface_type);
+       pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol);
+       pr_debug("activation_rf_tech_and_mode 0x%x\n",
+                ntf.activation_rf_tech_and_mode);
+       pr_debug("rf_tech_specific_params_len %d\n",
+                ntf.rf_tech_specific_params_len);
+
+       if (ntf.rf_tech_specific_params_len > 0) {
+               switch (ntf.activation_rf_tech_and_mode) {
+               case NCI_NFC_A_PASSIVE_POLL_MODE:
+                       data = nci_extract_rf_params_nfca_passive_poll(ndev,
+                               &ntf, data);
+                       break;
+
+               default:
+                       pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
+                              ntf.activation_rf_tech_and_mode);
+                       return;
+               }
+       }
 
-       default:
-               nfc_err("unsupported rf_tech_and_mode 0x%x",
-                       ntf.rf_tech_and_mode);
-               return;
+       ntf.data_exch_rf_tech_and_mode = *data++;
+       ntf.data_exch_tx_bit_rate = *data++;
+       ntf.data_exch_rx_bit_rate = *data++;
+       ntf.activation_params_len = *data++;
+
+       pr_debug("data_exch_rf_tech_and_mode 0x%x\n",
+                ntf.data_exch_rf_tech_and_mode);
+       pr_debug("data_exch_tx_bit_rate 0x%x\n",
+                ntf.data_exch_tx_bit_rate);
+       pr_debug("data_exch_rx_bit_rate 0x%x\n",
+                ntf.data_exch_rx_bit_rate);
+       pr_debug("activation_params_len %d\n",
+                ntf.activation_params_len);
+
+       if (ntf.activation_params_len > 0) {
+               switch (ntf.rf_interface_type) {
+               case NCI_RF_INTERFACE_ISO_DEP:
+                       err = nci_extract_activation_params_iso_dep(ndev,
+                               &ntf, data);
+                       break;
+
+               case NCI_RF_INTERFACE_FRAME:
+                       /* no activation params */
+                       break;
+
+               default:
+                       pr_err("unsupported rf_interface_type 0x%x\n",
+                              ntf.rf_interface_type);
+                       return;
+               }
        }
 
-       if (!rc)
+       if (!err)
                nci_target_found(ndev, &ntf);
 }
 
 static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
                                        struct sk_buff *skb)
 {
-       __u8 type = skb->data[0];
+       struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
 
-       nfc_dbg("entry, type 0x%x", type);
+       pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason);
 
        clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
        ndev->target_active_prot = 0;
@@ -214,6 +244,9 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
                ndev->rx_data_reassembly = 0;
        }
 
+       /* set the available credits to initial value */
+       atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+
        /* complete the data exchange transaction, if exists */
        if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
                nci_data_exchange_complete(ndev, NULL, -EIO);
@@ -223,11 +256,11 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u16 ntf_opcode = nci_opcode(skb->data);
 
-       nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-                       nci_pbf(skb->data),
-                       nci_opcode_gid(ntf_opcode),
-                       nci_opcode_oid(ntf_opcode),
-                       nci_plen(skb->data));
+       pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+                nci_pbf(skb->data),
+                nci_opcode_gid(ntf_opcode),
+                nci_opcode_oid(ntf_opcode),
+                nci_plen(skb->data));
 
        /* strip the nci control header */
        skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -237,12 +270,8 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_core_conn_credits_ntf_packet(ndev, skb);
                break;
 
-       case NCI_OP_RF_FIELD_INFO_NTF:
-               nci_rf_field_info_ntf_packet(ndev, skb);
-               break;
-
-       case NCI_OP_RF_ACTIVATE_NTF:
-               nci_rf_activate_ntf_packet(ndev, skb);
+       case NCI_OP_RF_INTF_ACTIVATED_NTF:
+               nci_rf_intf_activated_ntf_packet(ndev, skb);
                break;
 
        case NCI_OP_RF_DEACTIVATE_NTF:
@@ -250,7 +279,7 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
                break;
 
        default:
-               nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
+               pr_err("unknown ntf opcode 0x%x\n", ntf_opcode);
                break;
        }
 
index 0403d4c..ca611c5 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -40,12 +42,13 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        struct nci_core_reset_rsp *rsp = (void *) skb->data;
 
-       nfc_dbg("entry, status 0x%x", rsp->status);
+       pr_debug("status 0x%x\n", rsp->status);
 
-       if (rsp->status == NCI_STATUS_OK)
+       if (rsp->status == NCI_STATUS_OK) {
                ndev->nci_ver = rsp->nci_ver;
-
-       nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
+               pr_debug("nci_ver 0x%x, config_status 0x%x\n",
+                        rsp->nci_ver, rsp->config_status);
+       }
 
        nci_req_complete(ndev, rsp->status);
 }
@@ -55,16 +58,16 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
        struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
        struct nci_core_init_rsp_2 *rsp_2;
 
-       nfc_dbg("entry, status 0x%x", rsp_1->status);
+       pr_debug("status 0x%x\n", rsp_1->status);
 
        if (rsp_1->status != NCI_STATUS_OK)
-               return;
+               goto exit;
 
        ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
        ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
 
        if (ndev->num_supported_rf_interfaces >
-               NCI_MAX_SUPPORTED_RF_INTERFACES) {
+                       NCI_MAX_SUPPORTED_RF_INTERFACES) {
                ndev->num_supported_rf_interfaces =
                        NCI_MAX_SUPPORTED_RF_INTERFACES;
        }
@@ -73,68 +76,58 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                rsp_1->supported_rf_interfaces,
                ndev->num_supported_rf_interfaces);
 
-       rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
+       rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
 
        ndev->max_logical_connections =
                rsp_2->max_logical_connections;
        ndev->max_routing_table_size =
                __le16_to_cpu(rsp_2->max_routing_table_size);
-       ndev->max_control_packet_payload_length =
-               rsp_2->max_control_packet_payload_length;
-       ndev->rf_sending_buffer_size =
-               __le16_to_cpu(rsp_2->rf_sending_buffer_size);
-       ndev->rf_receiving_buffer_size =
-               __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
-       ndev->manufacturer_id =
-               __le16_to_cpu(rsp_2->manufacturer_id);
-
-       nfc_dbg("nfcc_features 0x%x",
-               ndev->nfcc_features);
-       nfc_dbg("num_supported_rf_interfaces %d",
-               ndev->num_supported_rf_interfaces);
-       nfc_dbg("supported_rf_interfaces[0] 0x%x",
-               ndev->supported_rf_interfaces[0]);
-       nfc_dbg("supported_rf_interfaces[1] 0x%x",
-               ndev->supported_rf_interfaces[1]);
-       nfc_dbg("supported_rf_interfaces[2] 0x%x",
-               ndev->supported_rf_interfaces[2]);
-       nfc_dbg("supported_rf_interfaces[3] 0x%x",
-               ndev->supported_rf_interfaces[3]);
-       nfc_dbg("max_logical_connections %d",
-               ndev->max_logical_connections);
-       nfc_dbg("max_routing_table_size %d",
-               ndev->max_routing_table_size);
-       nfc_dbg("max_control_packet_payload_length %d",
-               ndev->max_control_packet_payload_length);
-       nfc_dbg("rf_sending_buffer_size %d",
-               ndev->rf_sending_buffer_size);
-       nfc_dbg("rf_receiving_buffer_size %d",
-               ndev->rf_receiving_buffer_size);
-       nfc_dbg("manufacturer_id 0x%x",
-               ndev->manufacturer_id);
-
-       nci_req_complete(ndev, rsp_1->status);
-}
-
-static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
-                                               struct sk_buff *skb)
-{
-       struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
-
-       nfc_dbg("entry, status 0x%x", rsp->status);
-
-       if (rsp->status != NCI_STATUS_OK)
-               return;
-
-       ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
-       ndev->initial_num_credits = rsp->initial_num_credits;
-       ndev->conn_id = rsp->conn_id;
+       ndev->max_ctrl_pkt_payload_len =
+               rsp_2->max_ctrl_pkt_payload_len;
+       ndev->max_size_for_large_params =
+               __le16_to_cpu(rsp_2->max_size_for_large_params);
+       ndev->max_data_pkt_payload_size =
+               rsp_2->max_data_pkt_payload_size;
+       ndev->initial_num_credits =
+               rsp_2->initial_num_credits;
+       ndev->manufact_id =
+               rsp_2->manufact_id;
+       ndev->manufact_specific_info =
+               __le32_to_cpu(rsp_2->manufact_specific_info);
 
        atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
 
-       nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
-       nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
-       nfc_dbg("conn_id %d", ndev->conn_id);
+       pr_debug("nfcc_features 0x%x\n",
+                ndev->nfcc_features);
+       pr_debug("num_supported_rf_interfaces %d\n",
+                ndev->num_supported_rf_interfaces);
+       pr_debug("supported_rf_interfaces[0] 0x%x\n",
+                ndev->supported_rf_interfaces[0]);
+       pr_debug("supported_rf_interfaces[1] 0x%x\n",
+                ndev->supported_rf_interfaces[1]);
+       pr_debug("supported_rf_interfaces[2] 0x%x\n",
+                ndev->supported_rf_interfaces[2]);
+       pr_debug("supported_rf_interfaces[3] 0x%x\n",
+                ndev->supported_rf_interfaces[3]);
+       pr_debug("max_logical_connections %d\n",
+                ndev->max_logical_connections);
+       pr_debug("max_routing_table_size %d\n",
+                ndev->max_routing_table_size);
+       pr_debug("max_ctrl_pkt_payload_len %d\n",
+                ndev->max_ctrl_pkt_payload_len);
+       pr_debug("max_size_for_large_params %d\n",
+                ndev->max_size_for_large_params);
+       pr_debug("max_data_pkt_payload_size %d\n",
+                ndev->max_data_pkt_payload_size);
+       pr_debug("initial_num_credits %d\n",
+                ndev->initial_num_credits);
+       pr_debug("manufact_id 0x%x\n",
+                ndev->manufact_id);
+       pr_debug("manufact_specific_info 0x%x\n",
+                ndev->manufact_specific_info);
+
+exit:
+       nci_req_complete(ndev, rsp_1->status);
 }
 
 static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
@@ -142,7 +135,7 @@ static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
 {
        __u8 status = skb->data[0];
 
-       nfc_dbg("entry, status 0x%x", status);
+       pr_debug("status 0x%x\n", status);
 
        nci_req_complete(ndev, status);
 }
@@ -151,7 +144,7 @@ static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
 {
        __u8 status = skb->data[0];
 
-       nfc_dbg("entry, status 0x%x", status);
+       pr_debug("status 0x%x\n", status);
 
        if (status == NCI_STATUS_OK)
                set_bit(NCI_DISCOVERY, &ndev->flags);
@@ -164,7 +157,7 @@ static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
 {
        __u8 status = skb->data[0];
 
-       nfc_dbg("entry, status 0x%x", status);
+       pr_debug("status 0x%x\n", status);
 
        clear_bit(NCI_DISCOVERY, &ndev->flags);
 
@@ -178,11 +171,11 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
        /* we got a rsp, stop the cmd timer */
        del_timer(&ndev->cmd_timer);
 
-       nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
-                       nci_pbf(skb->data),
-                       nci_opcode_gid(rsp_opcode),
-                       nci_opcode_oid(rsp_opcode),
-                       nci_plen(skb->data));
+       pr_debug("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
+                nci_pbf(skb->data),
+                nci_opcode_gid(rsp_opcode),
+                nci_opcode_oid(rsp_opcode),
+                nci_plen(skb->data));
 
        /* strip the nci control header */
        skb_pull(skb, NCI_CTRL_HDR_SIZE);
@@ -196,10 +189,6 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                nci_core_init_rsp_packet(ndev, skb);
                break;
 
-       case NCI_OP_CORE_CONN_CREATE_RSP:
-               nci_core_conn_create_rsp_packet(ndev, skb);
-               break;
-
        case NCI_OP_RF_DISCOVER_MAP_RSP:
                nci_rf_disc_map_rsp_packet(ndev, skb);
                break;
@@ -213,7 +202,7 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
                break;
 
        default:
-               nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
+               pr_err("unknown rsp opcode 0x%x\n", rsp_opcode);
                break;
        }
 
index 03f8818..c10e9b8 100644 (file)
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <net/genetlink.h>
 #include <linux/nfc.h>
 #include <linux/slab.h>
@@ -51,8 +53,6 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
 {
        void *hdr;
 
-       nfc_dbg("entry");
-
        hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
                                &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
        if (!hdr)
@@ -105,8 +105,6 @@ static int nfc_genl_dump_targets(struct sk_buff *skb,
        struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
        int rc;
 
-       nfc_dbg("entry");
-
        if (!dev) {
                dev = __get_device_from_cb(cb);
                if (IS_ERR(dev))
@@ -139,8 +137,6 @@ static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
 {
        struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
 
-       nfc_dbg("entry");
-
        if (dev)
                nfc_put_device(dev);
 
@@ -152,8 +148,6 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       nfc_dbg("entry");
-
        dev->genl_data.poll_req_pid = 0;
 
        msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -183,8 +177,6 @@ int nfc_genl_device_added(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       nfc_dbg("entry");
-
        msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -216,8 +208,6 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
        struct sk_buff *msg;
        void *hdr;
 
-       nfc_dbg("entry");
-
        msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -249,8 +239,6 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
 {
        void *hdr;
 
-       nfc_dbg("entry");
-
        hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
                                                        NFC_CMD_GET_DEVICE);
        if (!hdr)
@@ -277,8 +265,6 @@ static int nfc_genl_dump_devices(struct sk_buff *skb,
        struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
        bool first_call = false;
 
-       nfc_dbg("entry");
-
        if (!iter) {
                first_call = true;
                iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
@@ -319,8 +305,6 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_dbg("entry");
-
        nfc_device_iter_exit(iter);
        kfree(iter);
 
@@ -334,8 +318,6 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
        u32 idx;
        int rc = -ENOBUFS;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -373,8 +355,6 @@ static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -396,8 +376,6 @@ static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -420,8 +398,6 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
        u32 idx;
        u32 protocols;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
                !info->attrs[NFC_ATTR_PROTOCOLS])
                return -EINVAL;
@@ -451,8 +427,6 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
        int rc;
        u32 idx;
 
-       nfc_dbg("entry");
-
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
                return -EINVAL;
 
@@ -524,7 +498,7 @@ static int nfc_genl_rcv_nl_event(struct notifier_block *this,
        if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
                goto out;
 
-       nfc_dbg("NETLINK_URELEASE event from id %d", n->pid);
+       pr_debug("NETLINK_URELEASE event from id %d\n", n->pid);
 
        nfc_device_iter_init(&iter);
        dev = nfc_device_iter_next(&iter);
index d86583f..67d6050 100644 (file)
 #include <net/nfc/nfc.h>
 #include <net/sock.h>
 
-__printf(2, 3)
-int nfc_printk(const char *level, const char *fmt, ...);
-
-#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
-#define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg)
-#define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg)
-
 struct nfc_protocol {
        int id;
        struct proto *proto;
index ee7b2b3..68ecf3f 100644 (file)
@@ -21,6 +21,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <net/tcp_states.h>
 #include <linux/nfc.h>
 #include <linux/export.h>
@@ -29,7 +31,7 @@
 
 static void rawsock_write_queue_purge(struct sock *sk)
 {
-       nfc_dbg("sk=%p", sk);
+       pr_debug("sk=%p\n", sk);
 
        spin_lock_bh(&sk->sk_write_queue.lock);
        __skb_queue_purge(&sk->sk_write_queue);
@@ -39,7 +41,7 @@ static void rawsock_write_queue_purge(struct sock *sk)
 
 static void rawsock_report_error(struct sock *sk, int err)
 {
-       nfc_dbg("sk=%p err=%d", sk, err);
+       pr_debug("sk=%p err=%d\n", sk, err);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
        sk->sk_err = -err;
@@ -52,7 +54,7 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       nfc_dbg("sock=%p", sock);
+       pr_debug("sock=%p\n", sock);
 
        sock_orphan(sk);
        sock_put(sk);
@@ -68,14 +70,14 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
        struct nfc_dev *dev;
        int rc = 0;
 
-       nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags);
+       pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
 
        if (!addr || len < sizeof(struct sockaddr_nfc) ||
                addr->sa_family != AF_NFC)
                return -EINVAL;
 
-       nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx,
-                                       addr->target_idx, addr->nfc_protocol);
+       pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n",
+                addr->dev_idx, addr->target_idx, addr->nfc_protocol);
 
        lock_sock(sk);
 
@@ -136,7 +138,7 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
 
        BUG_ON(in_irq());
 
-       nfc_dbg("sk=%p err=%d", sk, err);
+       pr_debug("sk=%p err=%d\n", sk, err);
 
        if (err)
                goto error;
@@ -172,7 +174,7 @@ static void rawsock_tx_work(struct work_struct *work)
        struct sk_buff *skb;
        int rc;
 
-       nfc_dbg("sk=%p target_idx=%u", sk, target_idx);
+       pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
 
        if (sk->sk_shutdown & SEND_SHUTDOWN) {
                rawsock_write_queue_purge(sk);
@@ -198,7 +200,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sk_buff *skb;
        int rc;
 
-       nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len);
+       pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
 
        if (msg->msg_namelen)
                return -EOPNOTSUPP;
@@ -239,7 +241,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
        int copied;
        int rc;
 
-       nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags);
+       pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
 
        skb = skb_recv_datagram(sk, flags, noblock, &rc);
        if (!skb)
@@ -283,7 +285,7 @@ static const struct proto_ops rawsock_ops = {
 
 static void rawsock_destruct(struct sock *sk)
 {
-       nfc_dbg("sk=%p", sk);
+       pr_debug("sk=%p\n", sk);
 
        if (sk->sk_state == TCP_ESTABLISHED) {
                nfc_deactivate_target(nfc_rawsock(sk)->dev,
@@ -294,7 +296,7 @@ static void rawsock_destruct(struct sock *sk)
        skb_queue_purge(&sk->sk_receive_queue);
 
        if (!sock_flag(sk, SOCK_DEAD)) {
-               nfc_err("Freeing alive NFC raw socket %p", sk);
+               pr_err("Freeing alive NFC raw socket %p\n", sk);
                return;
        }
 }
@@ -304,7 +306,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
 {
        struct sock *sk;
 
-       nfc_dbg("sock=%p", sock);
+       pr_debug("sock=%p\n", sock);
 
        if (sock->type != SOCK_SEQPACKET)
                return -ESOCKTNOSUPPORT;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
new file mode 100644 (file)
index 0000000..d9ea33c
--- /dev/null
@@ -0,0 +1,28 @@
+#
+# Open vSwitch
+#
+
+config OPENVSWITCH
+       tristate "Open vSwitch"
+       ---help---
+         Open vSwitch is a multilayer Ethernet switch targeted at virtualized
+         environments.  In addition to supporting a variety of features
+         expected in a traditional hardware switch, it enables fine-grained
+         programmatic extension and flow-based control of the network.  This
+         control is useful in a wide variety of applications but is
+         particularly important in multi-server virtualization deployments,
+         which are often characterized by highly dynamic endpoints and the
+         need to maintain logical abstractions for multiple tenants.
+
+         The Open vSwitch datapath provides an in-kernel fast path for packet
+         forwarding.  It is complemented by a userspace daemon, ovs-vswitchd,
+         which is able to accept configuration from a variety of sources and
+         translate it into packet processing rules.
+
+         See http://openvswitch.org for more information and userspace
+         utilities.
+
+         To compile this code as a module, choose M here: the module will be
+         called openvswitch.
+
+         If unsure, say N.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
new file mode 100644 (file)
index 0000000..15e7384
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Makefile for Open vSwitch.
+#
+
+obj-$(CONFIG_OPENVSWITCH) += openvswitch.o
+
+openvswitch-y := \
+       actions.o \
+       datapath.o \
+       dp_notify.o \
+       flow.o \
+       vport.o \
+       vport-internal_dev.o \
+       vport-netdev.o \
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
new file mode 100644 (file)
index 0000000..2725d1b
--- /dev/null
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/openvswitch.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in6.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/dsfield.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+                       const struct nlattr *attr, int len, bool keep_skb);
+
+static int make_writable(struct sk_buff *skb, int write_len)
+{
+       if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+               return 0;
+
+       return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
+/* remove VLAN header from packet and update csum accrodingly. */
+static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+{
+       struct vlan_hdr *vhdr;
+       int err;
+
+       err = make_writable(skb, VLAN_ETH_HLEN);
+       if (unlikely(err))
+               return err;
+
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_sub(skb->csum, csum_partial(skb->data
+                                       + ETH_HLEN, VLAN_HLEN, 0));
+
+       vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+       *current_tci = vhdr->h_vlan_TCI;
+
+       memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+       __skb_pull(skb, VLAN_HLEN);
+
+       vlan_set_encap_proto(skb, vhdr);
+       skb->mac_header += VLAN_HLEN;
+       skb_reset_mac_len(skb);
+
+       return 0;
+}
+
+static int pop_vlan(struct sk_buff *skb)
+{
+       __be16 tci;
+       int err;
+
+       if (likely(vlan_tx_tag_present(skb))) {
+               skb->vlan_tci = 0;
+       } else {
+               if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
+                            skb->len < VLAN_ETH_HLEN))
+                       return 0;
+
+               err = __pop_vlan_tci(skb, &tci);
+               if (err)
+                       return err;
+       }
+       /* move next vlan tag to hw accel tag */
+       if (likely(skb->protocol != htons(ETH_P_8021Q) ||
+                  skb->len < VLAN_ETH_HLEN))
+               return 0;
+
+       err = __pop_vlan_tci(skb, &tci);
+       if (unlikely(err))
+               return err;
+
+       __vlan_hwaccel_put_tag(skb, ntohs(tci));
+       return 0;
+}
+
+static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+{
+       if (unlikely(vlan_tx_tag_present(skb))) {
+               u16 current_tag;
+
+               /* push down current VLAN tag */
+               current_tag = vlan_tx_tag_get(skb);
+
+               if (!__vlan_put_tag(skb, current_tag))
+                       return -ENOMEM;
+
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       skb->csum = csum_add(skb->csum, csum_partial(skb->data
+                                       + ETH_HLEN, VLAN_HLEN, 0));
+
+       }
+       __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+       return 0;
+}
+
+static int set_eth_addr(struct sk_buff *skb,
+                       const struct ovs_key_ethernet *eth_key)
+{
+       int err;
+       err = make_writable(skb, ETH_HLEN);
+       if (unlikely(err))
+               return err;
+
+       memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
+       memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+
+       return 0;
+}
+
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+                               __be32 *addr, __be32 new_addr)
+{
+       int transport_len = skb->len - skb_transport_offset(skb);
+
+       if (nh->protocol == IPPROTO_TCP) {
+               if (likely(transport_len >= sizeof(struct tcphdr)))
+                       inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+                                                *addr, new_addr, 1);
+       } else if (nh->protocol == IPPROTO_UDP) {
+               if (likely(transport_len >= sizeof(struct udphdr)))
+                       inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
+                                                *addr, new_addr, 1);
+       }
+
+       csum_replace4(&nh->check, *addr, new_addr);
+       skb->rxhash = 0;
+       *addr = new_addr;
+}
+
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
+{
+       csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
+       nh->ttl = new_ttl;
+}
+
+static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+{
+       struct iphdr *nh;
+       int err;
+
+       err = make_writable(skb, skb_network_offset(skb) +
+                                sizeof(struct iphdr));
+       if (unlikely(err))
+               return err;
+
+       nh = ip_hdr(skb);
+
+       if (ipv4_key->ipv4_src != nh->saddr)
+               set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
+
+       if (ipv4_key->ipv4_dst != nh->daddr)
+               set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
+
+       if (ipv4_key->ipv4_tos != nh->tos)
+               ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
+
+       if (ipv4_key->ipv4_ttl != nh->ttl)
+               set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
+
+       return 0;
+}
+
+/* Must follow make_writable() since that can move the skb data. */
+static void set_tp_port(struct sk_buff *skb, __be16 *port,
+                        __be16 new_port, __sum16 *check)
+{
+       inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+       *port = new_port;
+       skb->rxhash = 0;
+}
+
+static int set_udp_port(struct sk_buff *skb,
+                       const struct ovs_key_udp *udp_port_key)
+{
+       struct udphdr *uh;
+       int err;
+
+       err = make_writable(skb, skb_transport_offset(skb) +
+                                sizeof(struct udphdr));
+       if (unlikely(err))
+               return err;
+
+       uh = udp_hdr(skb);
+       if (udp_port_key->udp_src != uh->source)
+               set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+
+       if (udp_port_key->udp_dst != uh->dest)
+               set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+
+       return 0;
+}
+
+static int set_tcp_port(struct sk_buff *skb,
+                       const struct ovs_key_tcp *tcp_port_key)
+{
+       struct tcphdr *th;
+       int err;
+
+       err = make_writable(skb, skb_transport_offset(skb) +
+                                sizeof(struct tcphdr));
+       if (unlikely(err))
+               return err;
+
+       th = tcp_hdr(skb);
+       if (tcp_port_key->tcp_src != th->source)
+               set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
+
+       if (tcp_port_key->tcp_dst != th->dest)
+               set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
+
+       return 0;
+}
+
+static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+{
+       struct vport *vport;
+
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       vport = rcu_dereference(dp->ports[out_port]);
+       if (unlikely(!vport)) {
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       ovs_vport_send(vport, skb);
+       return 0;
+}
+
+static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+                           const struct nlattr *attr)
+{
+       struct dp_upcall_info upcall;
+       const struct nlattr *a;
+       int rem;
+
+       upcall.cmd = OVS_PACKET_CMD_ACTION;
+       upcall.key = &OVS_CB(skb)->flow->key;
+       upcall.userdata = NULL;
+       upcall.pid = 0;
+
+       for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+                a = nla_next(a, &rem)) {
+               switch (nla_type(a)) {
+               case OVS_USERSPACE_ATTR_USERDATA:
+                       upcall.userdata = a;
+                       break;
+
+               case OVS_USERSPACE_ATTR_PID:
+                       upcall.pid = nla_get_u32(a);
+                       break;
+               }
+       }
+
+       return ovs_dp_upcall(dp, skb, &upcall);
+}
+
+static int sample(struct datapath *dp, struct sk_buff *skb,
+                 const struct nlattr *attr)
+{
+       const struct nlattr *acts_list = NULL;
+       const struct nlattr *a;
+       int rem;
+
+       for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+                a = nla_next(a, &rem)) {
+               switch (nla_type(a)) {
+               case OVS_SAMPLE_ATTR_PROBABILITY:
+                       if (net_random() >= nla_get_u32(a))
+                               return 0;
+                       break;
+
+               case OVS_SAMPLE_ATTR_ACTIONS:
+                       acts_list = a;
+                       break;
+               }
+       }
+
+       return do_execute_actions(dp, skb, nla_data(acts_list),
+                                                nla_len(acts_list), true);
+}
+
+static int execute_set_action(struct sk_buff *skb,
+                                const struct nlattr *nested_attr)
+{
+       int err = 0;
+
+       switch (nla_type(nested_attr)) {
+       case OVS_KEY_ATTR_PRIORITY:
+               skb->priority = nla_get_u32(nested_attr);
+               break;
+
+       case OVS_KEY_ATTR_ETHERNET:
+               err = set_eth_addr(skb, nla_data(nested_attr));
+               break;
+
+       case OVS_KEY_ATTR_IPV4:
+               err = set_ipv4(skb, nla_data(nested_attr));
+               break;
+
+       case OVS_KEY_ATTR_TCP:
+               err = set_tcp_port(skb, nla_data(nested_attr));
+               break;
+
+       case OVS_KEY_ATTR_UDP:
+               err = set_udp_port(skb, nla_data(nested_attr));
+               break;
+       }
+
+       return err;
+}
+
+/* Execute a list of actions against 'skb'. */
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+                       const struct nlattr *attr, int len, bool keep_skb)
+{
+       /* Every output action needs a separate clone of 'skb', but the common
+        * case is just a single output action, so that doing a clone and
+        * then freeing the original skbuff is wasteful.  So the following code
+        * is slightly obscure just to avoid that. */
+       int prev_port = -1;
+       const struct nlattr *a;
+       int rem;
+
+       for (a = attr, rem = len; rem > 0;
+            a = nla_next(a, &rem)) {
+               int err = 0;
+
+               if (prev_port != -1) {
+                       do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+                       prev_port = -1;
+               }
+
+               switch (nla_type(a)) {
+               case OVS_ACTION_ATTR_OUTPUT:
+                       prev_port = nla_get_u32(a);
+                       break;
+
+               case OVS_ACTION_ATTR_USERSPACE:
+                       output_userspace(dp, skb, a);
+                       break;
+
+               case OVS_ACTION_ATTR_PUSH_VLAN:
+                       err = push_vlan(skb, nla_data(a));
+                       if (unlikely(err)) /* skb already freed. */
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_POP_VLAN:
+                       err = pop_vlan(skb);
+                       break;
+
+               case OVS_ACTION_ATTR_SET:
+                       err = execute_set_action(skb, nla_data(a));
+                       break;
+
+               case OVS_ACTION_ATTR_SAMPLE:
+                       err = sample(dp, skb, a);
+                       break;
+               }
+
+               if (unlikely(err)) {
+                       kfree_skb(skb);
+                       return err;
+               }
+       }
+
+       if (prev_port != -1) {
+               if (keep_skb)
+                       skb = skb_clone(skb, GFP_ATOMIC);
+
+               do_output(dp, skb, prev_port);
+       } else if (!keep_skb)
+               consume_skb(skb);
+
+       return 0;
+}
+
+/* Execute a list of actions against 'skb'. */
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
+{
+       struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+       return do_execute_actions(dp, skb, acts->actions,
+                                        acts->actions_len, false);
+}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
new file mode 100644 (file)
index 0000000..9a27251
--- /dev/null
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jhash.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/etherdevice.h>
+#include <linux/genetlink.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/wait.h>
+#include <asm/system.h>
+#include <asm/div64.h>
+#include <linux/highmem.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/inetdevice.h>
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/rculist.h>
+#include <linux/dmi.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "flow.h"
+#include "vport-internal_dev.h"
+
+/**
+ * DOC: Locking:
+ *
+ * Writes to device state (add/remove datapath, port, set operations on vports,
+ * etc.) are protected by RTNL.
+ *
+ * Writes to other state (flow table modifications, set miscellaneous datapath
+ * parameters, etc.) are protected by genl_mutex.  The RTNL lock nests inside
+ * genl_mutex.
+ *
+ * Reads are protected by RCU.
+ *
+ * There are a few special cases (mostly stats) that have their own
+ * synchronization but they nest under all of above and don't interact with
+ * each other.
+ */
+
+/* Global list of datapaths to enable dumping them all out.
+ * Protected by genl_mutex.
+ */
+static LIST_HEAD(dps);
+
+#define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
+static void rehash_flow_table(struct work_struct *work);
+static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
+
+static struct vport *new_vport(const struct vport_parms *);
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *,
+                            const struct dp_upcall_info *);
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *,
+                                 const struct dp_upcall_info *);
+
+/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
+static struct datapath *get_dp(int dp_ifindex)
+{
+       struct datapath *dp = NULL;
+       struct net_device *dev;
+
+       rcu_read_lock();
+       dev = dev_get_by_index_rcu(&init_net, dp_ifindex);
+       if (dev) {
+               struct vport *vport = ovs_internal_dev_get_vport(dev);
+               if (vport)
+                       dp = vport->dp;
+       }
+       rcu_read_unlock();
+
+       return dp;
+}
+
+/* Must be called with rcu_read_lock or RTNL lock. */
+const char *ovs_dp_name(const struct datapath *dp)
+{
+       struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+       return vport->ops->get_name(vport);
+}
+
+static int get_dpifindex(struct datapath *dp)
+{
+       struct vport *local;
+       int ifindex;
+
+       rcu_read_lock();
+
+       local = rcu_dereference(dp->ports[OVSP_LOCAL]);
+       if (local)
+               ifindex = local->ops->get_ifindex(local);
+       else
+               ifindex = 0;
+
+       rcu_read_unlock();
+
+       return ifindex;
+}
+
+static void destroy_dp_rcu(struct rcu_head *rcu)
+{
+       struct datapath *dp = container_of(rcu, struct datapath, rcu);
+
+       ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
+       free_percpu(dp->stats_percpu);
+       kfree(dp);
+}
+
+/* Called with RTNL lock and genl_lock. */
+static struct vport *new_vport(const struct vport_parms *parms)
+{
+       struct vport *vport;
+
+       vport = ovs_vport_add(parms);
+       if (!IS_ERR(vport)) {
+               struct datapath *dp = parms->dp;
+
+               rcu_assign_pointer(dp->ports[parms->port_no], vport);
+               list_add(&vport->node, &dp->port_list);
+       }
+
+       return vport;
+}
+
+/* Called with RTNL lock. */
+void ovs_dp_detach_port(struct vport *p)
+{
+       ASSERT_RTNL();
+
+       /* First drop references to device. */
+       list_del(&p->node);
+       rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
+
+       /* Then destroy it. */
+       ovs_vport_del(p);
+}
+
+/* Must be called with rcu_read_lock. */
+void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+{
+       struct datapath *dp = p->dp;
+       struct sw_flow *flow;
+       struct dp_stats_percpu *stats;
+       struct sw_flow_key key;
+       u64 *stats_counter;
+       int error;
+       int key_len;
+
+       stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+       /* Extract flow from 'skb' into 'key'. */
+       error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
+       if (unlikely(error)) {
+               kfree_skb(skb);
+               return;
+       }
+
+       /* Look up flow. */
+       flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+       if (unlikely(!flow)) {
+               struct dp_upcall_info upcall;
+
+               upcall.cmd = OVS_PACKET_CMD_MISS;
+               upcall.key = &key;
+               upcall.userdata = NULL;
+               upcall.pid = p->upcall_pid;
+               ovs_dp_upcall(dp, skb, &upcall);
+               consume_skb(skb);
+               stats_counter = &stats->n_missed;
+               goto out;
+       }
+
+       OVS_CB(skb)->flow = flow;
+
+       stats_counter = &stats->n_hit;
+       ovs_flow_used(OVS_CB(skb)->flow, skb);
+       ovs_execute_actions(dp, skb);
+
+out:
+       /* Update datapath statistics. */
+       u64_stats_update_begin(&stats->sync);
+       (*stats_counter)++;
+       u64_stats_update_end(&stats->sync);
+}
+
+static struct genl_family dp_packet_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_PACKET_FAMILY,
+       .version = OVS_PACKET_VERSION,
+       .maxattr = OVS_PACKET_ATTR_MAX
+};
+
+int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+             const struct dp_upcall_info *upcall_info)
+{
+       struct dp_stats_percpu *stats;
+       int dp_ifindex;
+       int err;
+
+       if (upcall_info->pid == 0) {
+               err = -ENOTCONN;
+               goto err;
+       }
+
+       dp_ifindex = get_dpifindex(dp);
+       if (!dp_ifindex) {
+               err = -ENODEV;
+               goto err;
+       }
+
+       if (!skb_is_gso(skb))
+               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+       else
+               err = queue_gso_packets(dp_ifindex, skb, upcall_info);
+       if (err)
+               goto err;
+
+       return 0;
+
+err:
+       stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+       u64_stats_update_begin(&stats->sync);
+       stats->n_lost++;
+       u64_stats_update_end(&stats->sync);
+
+       return err;
+}
+
+static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
+                            const struct dp_upcall_info *upcall_info)
+{
+       struct dp_upcall_info later_info;
+       struct sw_flow_key later_key;
+       struct sk_buff *segs, *nskb;
+       int err;
+
+       segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       /* Queue all of the segments. */
+       skb = segs;
+       do {
+               err = queue_userspace_packet(dp_ifindex, skb, upcall_info);
+               if (err)
+                       break;
+
+               if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+                       /* The initial flow key extracted by ovs_flow_extract()
+                        * in this case is for a first fragment, so we need to
+                        * properly mark later fragments.
+                        */
+                       later_key = *upcall_info->key;
+                       later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+
+                       later_info = *upcall_info;
+                       later_info.key = &later_key;
+                       upcall_info = &later_info;
+               }
+       } while ((skb = skb->next));
+
+       /* Free all of the segments. */
+       skb = segs;
+       do {
+               nskb = skb->next;
+               if (err)
+                       kfree_skb(skb);
+               else
+                       consume_skb(skb);
+       } while ((skb = nskb));
+       return err;
+}
+
+static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+                                 const struct dp_upcall_info *upcall_info)
+{
+       struct ovs_header *upcall;
+       struct sk_buff *nskb = NULL;
+       struct sk_buff *user_skb; /* to be queued to userspace */
+       struct nlattr *nla;
+       unsigned int len;
+       int err;
+
+       if (vlan_tx_tag_present(skb)) {
+               nskb = skb_clone(skb, GFP_ATOMIC);
+               if (!nskb)
+                       return -ENOMEM;
+
+               nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
+               if (!skb)
+                       return -ENOMEM;
+
+               nskb->vlan_tci = 0;
+               skb = nskb;
+       }
+
+       if (nla_attr_size(skb->len) > USHRT_MAX) {
+               err = -EFBIG;
+               goto out;
+       }
+
+       len = sizeof(struct ovs_header);
+       len += nla_total_size(skb->len);
+       len += nla_total_size(FLOW_BUFSIZE);
+       if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
+               len += nla_total_size(8);
+
+       user_skb = genlmsg_new(len, GFP_ATOMIC);
+       if (!user_skb) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
+                            0, upcall_info->cmd);
+       upcall->dp_ifindex = dp_ifindex;
+
+       nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
+       ovs_flow_to_nlattrs(upcall_info->key, user_skb);
+       nla_nest_end(user_skb, nla);
+
+       if (upcall_info->userdata)
+               nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
+                           nla_get_u64(upcall_info->userdata));
+
+       nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
+
+       skb_copy_and_csum_dev(skb, nla_data(nla));
+
+       err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
+
+out:
+       kfree_skb(nskb);
+       return err;
+}
+
+/* Called with genl_mutex. */
+static int flush_flows(int dp_ifindex)
+{
+       struct flow_table *old_table;
+       struct flow_table *new_table;
+       struct datapath *dp;
+
+       dp = get_dp(dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       old_table = genl_dereference(dp->table);
+       new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
+       if (!new_table)
+               return -ENOMEM;
+
+       rcu_assign_pointer(dp->table, new_table);
+
+       ovs_flow_tbl_deferred_destroy(old_table);
+       return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+                               const struct sw_flow_key *key, int depth);
+
+static int validate_sample(const struct nlattr *attr,
+                               const struct sw_flow_key *key, int depth)
+{
+       const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+       const struct nlattr *probability, *actions;
+       const struct nlattr *a;
+       int rem;
+
+       memset(attrs, 0, sizeof(attrs));
+       nla_for_each_nested(a, attr, rem) {
+               int type = nla_type(a);
+               if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+                       return -EINVAL;
+               attrs[type] = a;
+       }
+       if (rem)
+               return -EINVAL;
+
+       probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+       if (!probability || nla_len(probability) != sizeof(u32))
+               return -EINVAL;
+
+       actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+       if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+               return -EINVAL;
+       return validate_actions(actions, key, depth + 1);
+}
+
+static int validate_set(const struct nlattr *a,
+                       const struct sw_flow_key *flow_key)
+{
+       const struct nlattr *ovs_key = nla_data(a);
+       int key_type = nla_type(ovs_key);
+
+       /* There can be only one key in a action */
+       if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
+               return -EINVAL;
+
+       if (key_type > OVS_KEY_ATTR_MAX ||
+           nla_len(ovs_key) != ovs_key_lens[key_type])
+               return -EINVAL;
+
+       switch (key_type) {
+       const struct ovs_key_ipv4 *ipv4_key;
+
+       case OVS_KEY_ATTR_PRIORITY:
+       case OVS_KEY_ATTR_ETHERNET:
+               break;
+
+       case OVS_KEY_ATTR_IPV4:
+               if (flow_key->eth.type != htons(ETH_P_IP))
+                       return -EINVAL;
+
+               if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst)
+                       return -EINVAL;
+
+               ipv4_key = nla_data(ovs_key);
+               if (ipv4_key->ipv4_proto != flow_key->ip.proto)
+                       return -EINVAL;
+
+               if (ipv4_key->ipv4_frag != flow_key->ip.frag)
+                       return -EINVAL;
+
+               break;
+
+       case OVS_KEY_ATTR_TCP:
+               if (flow_key->ip.proto != IPPROTO_TCP)
+                       return -EINVAL;
+
+               if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+                       return -EINVAL;
+
+               break;
+
+       case OVS_KEY_ATTR_UDP:
+               if (flow_key->ip.proto != IPPROTO_UDP)
+                       return -EINVAL;
+
+               if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
+                       return -EINVAL;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+       static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =   {
+               [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+               [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
+       };
+       struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+       int error;
+
+       error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+                                attr, userspace_policy);
+       if (error)
+               return error;
+
+       if (!a[OVS_USERSPACE_ATTR_PID] ||
+           !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int validate_actions(const struct nlattr *attr,
+                               const struct sw_flow_key *key,  int depth)
+{
+       const struct nlattr *a;
+       int rem, err;
+
+       if (depth >= SAMPLE_ACTION_DEPTH)
+               return -EOVERFLOW;
+
+       nla_for_each_nested(a, attr, rem) {
+               /* Expected argument lengths, (u32)-1 for variable length. */
+               static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
+                       [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+                       [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
+                       [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
+                       [OVS_ACTION_ATTR_POP_VLAN] = 0,
+                       [OVS_ACTION_ATTR_SET] = (u32)-1,
+                       [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+               };
+               const struct ovs_action_push_vlan *vlan;
+               int type = nla_type(a);
+
+               if (type > OVS_ACTION_ATTR_MAX ||
+                   (action_lens[type] != nla_len(a) &&
+                    action_lens[type] != (u32)-1))
+                       return -EINVAL;
+
+               switch (type) {
+               case OVS_ACTION_ATTR_UNSPEC:
+                       return -EINVAL;
+
+               case OVS_ACTION_ATTR_USERSPACE:
+                       err = validate_userspace(a);
+                       if (err)
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_OUTPUT:
+                       if (nla_get_u32(a) >= DP_MAX_PORTS)
+                               return -EINVAL;
+                       break;
+
+
+               case OVS_ACTION_ATTR_POP_VLAN:
+                       break;
+
+               case OVS_ACTION_ATTR_PUSH_VLAN:
+                       vlan = nla_data(a);
+                       if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+                               return -EINVAL;
+                       if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+                               return -EINVAL;
+                       break;
+
+               case OVS_ACTION_ATTR_SET:
+                       err = validate_set(a, key);
+                       if (err)
+                               return err;
+                       break;
+
+               case OVS_ACTION_ATTR_SAMPLE:
+                       err = validate_sample(a, key, depth);
+                       if (err)
+                               return err;
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       if (rem > 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void clear_stats(struct sw_flow *flow)
+{
+       flow->used = 0;
+       flow->tcp_flags = 0;
+       flow->packet_count = 0;
+       flow->byte_count = 0;
+}
+
+static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
+{
+       struct ovs_header *ovs_header = info->userhdr;
+       struct nlattr **a = info->attrs;
+       struct sw_flow_actions *acts;
+       struct sk_buff *packet;
+       struct sw_flow *flow;
+       struct datapath *dp;
+       struct ethhdr *eth;
+       int len;
+       int err;
+       int key_len;
+
+       err = -EINVAL;
+       if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
+           !a[OVS_PACKET_ATTR_ACTIONS] ||
+           nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
+               goto err;
+
+       len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
+       packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
+       err = -ENOMEM;
+       if (!packet)
+               goto err;
+       skb_reserve(packet, NET_IP_ALIGN);
+
+       memcpy(__skb_put(packet, len), nla_data(a[OVS_PACKET_ATTR_PACKET]), len);
+
+       skb_reset_mac_header(packet);
+       eth = eth_hdr(packet);
+
+       /* Normally, setting the skb 'protocol' field would be handled by a
+        * call to eth_type_trans(), but it assumes there's a sending
+        * device, which we may not have. */
+       if (ntohs(eth->h_proto) >= 1536)
+               packet->protocol = eth->h_proto;
+       else
+               packet->protocol = htons(ETH_P_802_2);
+
+       /* Build an sw_flow for sending this packet. */
+       flow = ovs_flow_alloc();
+       err = PTR_ERR(flow);
+       if (IS_ERR(flow))
+               goto err_kfree_skb;
+
+       err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
+       if (err)
+               goto err_flow_free;
+
+       err = ovs_flow_metadata_from_nlattrs(&flow->key.phy.priority,
+                                            &flow->key.phy.in_port,
+                                            a[OVS_PACKET_ATTR_KEY]);
+       if (err)
+               goto err_flow_free;
+
+       err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
+       if (err)
+               goto err_flow_free;
+
+       flow->hash = ovs_flow_hash(&flow->key, key_len);
+
+       acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
+       err = PTR_ERR(acts);
+       if (IS_ERR(acts))
+               goto err_flow_free;
+       rcu_assign_pointer(flow->sf_acts, acts);
+
+       OVS_CB(packet)->flow = flow;
+       packet->priority = flow->key.phy.priority;
+
+       rcu_read_lock();
+       dp = get_dp(ovs_header->dp_ifindex);
+       err = -ENODEV;
+       if (!dp)
+               goto err_unlock;
+
+       local_bh_disable();
+       err = ovs_execute_actions(dp, packet);
+       local_bh_enable();
+       rcu_read_unlock();
+
+       ovs_flow_free(flow);
+       return err;
+
+err_unlock:
+       rcu_read_unlock();
+err_flow_free:
+       ovs_flow_free(flow);
+err_kfree_skb:
+       kfree_skb(packet);
+err:
+       return err;
+}
+
+static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
+       [OVS_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+       [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
+       [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_ops dp_packet_genl_ops[] = {
+       { .cmd = OVS_PACKET_CMD_EXECUTE,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = packet_policy,
+         .doit = ovs_packet_cmd_execute
+       }
+};
+
+static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
+{
+       int i;
+       struct flow_table *table = genl_dereference(dp->table);
+
+       stats->n_flows = ovs_flow_tbl_count(table);
+
+       stats->n_hit = stats->n_missed = stats->n_lost = 0;
+       for_each_possible_cpu(i) {
+               const struct dp_stats_percpu *percpu_stats;
+               struct dp_stats_percpu local_stats;
+               unsigned int start;
+
+               percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+                       local_stats = *percpu_stats;
+               } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+               stats->n_hit += local_stats.n_hit;
+               stats->n_missed += local_stats.n_missed;
+               stats->n_lost += local_stats.n_lost;
+       }
+}
+
+static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
+       [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+};
+
+static struct genl_family dp_flow_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_FLOW_FAMILY,
+       .version = OVS_FLOW_VERSION,
+       .maxattr = OVS_FLOW_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_flow_multicast_group = {
+       .name = OVS_FLOW_MCGROUP
+};
+
+/* Called with genl_lock. */
+static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
+                                 struct sk_buff *skb, u32 pid,
+                                 u32 seq, u32 flags, u8 cmd)
+{
+       const int skb_orig_len = skb->len;
+       const struct sw_flow_actions *sf_acts;
+       struct ovs_flow_stats stats;
+       struct ovs_header *ovs_header;
+       struct nlattr *nla;
+       unsigned long used;
+       u8 tcp_flags;
+       int err;
+
+       sf_acts = rcu_dereference_protected(flow->sf_acts,
+                                           lockdep_genl_is_held());
+
+       ovs_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+       if (!ovs_header)
+               return -EMSGSIZE;
+
+       ovs_header->dp_ifindex = get_dpifindex(dp);
+
+       nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
+       if (!nla)
+               goto nla_put_failure;
+       err = ovs_flow_to_nlattrs(&flow->key, skb);
+       if (err)
+               goto error;
+       nla_nest_end(skb, nla);
+
+       spin_lock_bh(&flow->lock);
+       used = flow->used;
+       stats.n_packets = flow->packet_count;
+       stats.n_bytes = flow->byte_count;
+       tcp_flags = flow->tcp_flags;
+       spin_unlock_bh(&flow->lock);
+
+       if (used)
+               NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used));
+
+       if (stats.n_packets)
+               NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
+                       sizeof(struct ovs_flow_stats), &stats);
+
+       if (tcp_flags)
+               NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+       /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
+        * this is the first flow to be dumped into 'skb'.  This is unusual for
+        * Netlink but individual action lists can be longer than
+        * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
+        * The userspace caller can always fetch the actions separately if it
+        * really wants them.  (Most userspace callers in fact don't care.)
+        *
+        * This can only fail for dump operations because the skb is always
+        * properly sized for single flows.
+        */
+       err = nla_put(skb, OVS_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
+                     sf_acts->actions);
+       if (err < 0 && skb_orig_len)
+               goto error;
+
+       return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+       err = -EMSGSIZE;
+error:
+       genlmsg_cancel(skb, ovs_header);
+       return err;
+}
+
+static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
+{
+       const struct sw_flow_actions *sf_acts;
+       int len;
+
+       sf_acts = rcu_dereference_protected(flow->sf_acts,
+                                           lockdep_genl_is_held());
+
+       /* OVS_FLOW_ATTR_KEY */
+       len = nla_total_size(FLOW_BUFSIZE);
+       /* OVS_FLOW_ATTR_ACTIONS */
+       len += nla_total_size(sf_acts->actions_len);
+       /* OVS_FLOW_ATTR_STATS */
+       len += nla_total_size(sizeof(struct ovs_flow_stats));
+       /* OVS_FLOW_ATTR_TCP_FLAGS */
+       len += nla_total_size(1);
+       /* OVS_FLOW_ATTR_USED */
+       len += nla_total_size(8);
+
+       len += NLMSG_ALIGN(sizeof(struct ovs_header));
+
+       return genlmsg_new(len, GFP_KERNEL);
+}
+
+static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
+                                              struct datapath *dp,
+                                              u32 pid, u32 seq, u8 cmd)
+{
+       struct sk_buff *skb;
+       int retval;
+
+       skb = ovs_flow_cmd_alloc_info(flow);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       retval = ovs_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+       BUG_ON(retval < 0);
+       return skb;
+}
+
+static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key;
+       struct sw_flow *flow;
+       struct sk_buff *reply;
+       struct datapath *dp;
+       struct flow_table *table;
+       int error;
+       int key_len;
+
+       /* Extract key. */
+       error = -EINVAL;
+       if (!a[OVS_FLOW_ATTR_KEY])
+               goto error;
+       error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+       if (error)
+               goto error;
+
+       /* Validate actions. */
+       if (a[OVS_FLOW_ATTR_ACTIONS]) {
+               error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], &key,  0);
+               if (error)
+                       goto error;
+       } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
+               error = -EINVAL;
+               goto error;
+       }
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       error = -ENODEV;
+       if (!dp)
+               goto error;
+
+       table = genl_dereference(dp->table);
+       flow = ovs_flow_tbl_lookup(table, &key, key_len);
+       if (!flow) {
+               struct sw_flow_actions *acts;
+
+               /* Bail out if we're not allowed to create a new flow. */
+               error = -ENOENT;
+               if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
+                       goto error;
+
+               /* Expand table, if necessary, to make room. */
+               if (ovs_flow_tbl_need_to_expand(table)) {
+                       struct flow_table *new_table;
+
+                       new_table = ovs_flow_tbl_expand(table);
+                       if (!IS_ERR(new_table)) {
+                               rcu_assign_pointer(dp->table, new_table);
+                               ovs_flow_tbl_deferred_destroy(table);
+                               table = genl_dereference(dp->table);
+                       }
+               }
+
+               /* Allocate flow. */
+               flow = ovs_flow_alloc();
+               if (IS_ERR(flow)) {
+                       error = PTR_ERR(flow);
+                       goto error;
+               }
+               flow->key = key;
+               clear_stats(flow);
+
+               /* Obtain actions. */
+               acts = ovs_flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+               error = PTR_ERR(acts);
+               if (IS_ERR(acts))
+                       goto error_free_flow;
+               rcu_assign_pointer(flow->sf_acts, acts);
+
+               /* Put flow in bucket. */
+               flow->hash = ovs_flow_hash(&key, key_len);
+               ovs_flow_tbl_insert(table, flow);
+
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+                                               info->snd_seq,
+                                               OVS_FLOW_CMD_NEW);
+       } else {
+               /* We found a matching flow. */
+               struct sw_flow_actions *old_acts;
+               struct nlattr *acts_attrs;
+
+               /* Bail out if we're not allowed to modify an existing flow.
+                * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+                * because Generic Netlink treats the latter as a dump
+                * request.  We also accept NLM_F_EXCL in case that bug ever
+                * gets fixed.
+                */
+               error = -EEXIST;
+               if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
+                   info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+                       goto error;
+
+               /* Update actions. */
+               old_acts = rcu_dereference_protected(flow->sf_acts,
+                                                    lockdep_genl_is_held());
+               acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
+               if (acts_attrs &&
+                  (old_acts->actions_len != nla_len(acts_attrs) ||
+                  memcmp(old_acts->actions, nla_data(acts_attrs),
+                         old_acts->actions_len))) {
+                       struct sw_flow_actions *new_acts;
+
+                       new_acts = ovs_flow_actions_alloc(acts_attrs);
+                       error = PTR_ERR(new_acts);
+                       if (IS_ERR(new_acts))
+                               goto error;
+
+                       rcu_assign_pointer(flow->sf_acts, new_acts);
+                       ovs_flow_deferred_free_acts(old_acts);
+               }
+
+               reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+                                              info->snd_seq, OVS_FLOW_CMD_NEW);
+
+               /* Clear stats. */
+               if (a[OVS_FLOW_ATTR_CLEAR]) {
+                       spin_lock_bh(&flow->lock);
+                       clear_stats(flow);
+                       spin_unlock_bh(&flow->lock);
+               }
+       }
+
+       if (!IS_ERR(reply))
+               genl_notify(reply, genl_info_net(info), info->snd_pid,
+                          ovs_dp_flow_multicast_group.id, info->nlhdr,
+                          GFP_KERNEL);
+       else
+               netlink_set_err(init_net.genl_sock, 0,
+                               ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
+       return 0;
+
+error_free_flow:
+       ovs_flow_free(flow);
+error:
+       return error;
+}
+
+static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key;
+       struct sk_buff *reply;
+       struct sw_flow *flow;
+       struct datapath *dp;
+       struct flow_table *table;
+       int err;
+       int key_len;
+
+       if (!a[OVS_FLOW_ATTR_KEY])
+               return -EINVAL;
+       err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+       if (err)
+               return err;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       table = genl_dereference(dp->table);
+       flow = ovs_flow_tbl_lookup(table, &key, key_len);
+       if (!flow)
+               return -ENOENT;
+
+       reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+                                       info->snd_seq, OVS_FLOW_CMD_NEW);
+       if (IS_ERR(reply))
+               return PTR_ERR(reply);
+
+       return genlmsg_reply(reply, info);
+}
+
+static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sw_flow_key key;
+       struct sk_buff *reply;
+       struct sw_flow *flow;
+       struct datapath *dp;
+       struct flow_table *table;
+       int err;
+       int key_len;
+
+       if (!a[OVS_FLOW_ATTR_KEY])
+               return flush_flows(ovs_header->dp_ifindex);
+       err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+       if (err)
+               return err;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       table = genl_dereference(dp->table);
+       flow = ovs_flow_tbl_lookup(table, &key, key_len);
+       if (!flow)
+               return -ENOENT;
+
+       reply = ovs_flow_cmd_alloc_info(flow);
+       if (!reply)
+               return -ENOMEM;
+
+       ovs_flow_tbl_remove(table, flow);
+
+       err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
+                                    info->snd_seq, 0, OVS_FLOW_CMD_DEL);
+       BUG_ON(err < 0);
+
+       ovs_flow_deferred_free(flow);
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+       return 0;
+}
+
+static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+       struct datapath *dp;
+       struct flow_table *table;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       table = genl_dereference(dp->table);
+
+       for (;;) {
+               struct sw_flow *flow;
+               u32 bucket, obj;
+
+               bucket = cb->args[0];
+               obj = cb->args[1];
+               flow = ovs_flow_tbl_next(table, &bucket, &obj);
+               if (!flow)
+                       break;
+
+               if (ovs_flow_cmd_fill_info(flow, dp, skb,
+                                          NETLINK_CB(cb->skb).pid,
+                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                          OVS_FLOW_CMD_NEW) < 0)
+                       break;
+
+               cb->args[0] = bucket;
+               cb->args[1] = obj;
+       }
+       return skb->len;
+}
+
+static struct genl_ops dp_flow_genl_ops[] = {
+       { .cmd = OVS_FLOW_CMD_NEW,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_new_or_set
+       },
+       { .cmd = OVS_FLOW_CMD_DEL,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_del
+       },
+       { .cmd = OVS_FLOW_CMD_GET,
+         .flags = 0,               /* OK for unprivileged users. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_get,
+         .dumpit = ovs_flow_cmd_dump
+       },
+       { .cmd = OVS_FLOW_CMD_SET,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = flow_policy,
+         .doit = ovs_flow_cmd_new_or_set,
+       },
+};
+
+static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+       [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+};
+
+static struct genl_family dp_datapath_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_DATAPATH_FAMILY,
+       .version = OVS_DATAPATH_VERSION,
+       .maxattr = OVS_DP_ATTR_MAX
+};
+
+static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+       .name = OVS_DATAPATH_MCGROUP
+};
+
+static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
+                               u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+       struct ovs_header *ovs_header;
+       struct ovs_dp_stats dp_stats;
+       int err;
+
+       ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+                                  flags, cmd);
+       if (!ovs_header)
+               goto error;
+
+       ovs_header->dp_ifindex = get_dpifindex(dp);
+
+       rcu_read_lock();
+       err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
+       rcu_read_unlock();
+       if (err)
+               goto nla_put_failure;
+
+       get_dp_stats(dp, &dp_stats);
+       NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats);
+
+       return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+       genlmsg_cancel(skb, ovs_header);
+error:
+       return -EMSGSIZE;
+}
+
+static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 pid,
+                                            u32 seq, u8 cmd)
+{
+       struct sk_buff *skb;
+       int retval;
+
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       retval = ovs_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+       if (retval < 0) {
+               kfree_skb(skb);
+               return ERR_PTR(retval);
+       }
+       return skb;
+}
+
+/* Called with genl_mutex and optionally with RTNL lock also. */
+static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+                                       struct nlattr *a[OVS_DP_ATTR_MAX + 1])
+{
+       struct datapath *dp;
+
+       if (!a[OVS_DP_ATTR_NAME])
+               dp = get_dp(ovs_header->dp_ifindex);
+       else {
+               struct vport *vport;
+
+               rcu_read_lock();
+               vport = ovs_vport_locate(nla_data(a[OVS_DP_ATTR_NAME]));
+               dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
+               rcu_read_unlock();
+       }
+       return dp ? dp : ERR_PTR(-ENODEV);
+}
+
+static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct vport_parms parms;
+       struct sk_buff *reply;
+       struct datapath *dp;
+       struct vport *vport;
+       int err;
+
+       err = -EINVAL;
+       if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
+               goto err;
+
+       rtnl_lock();
+       err = -ENODEV;
+       if (!try_module_get(THIS_MODULE))
+               goto err_unlock_rtnl;
+
+       err = -ENOMEM;
+       dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+       if (dp == NULL)
+               goto err_put_module;
+       INIT_LIST_HEAD(&dp->port_list);
+
+       /* Allocate table. */
+       err = -ENOMEM;
+       rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
+       if (!dp->table)
+               goto err_free_dp;
+
+       dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+       if (!dp->stats_percpu) {
+               err = -ENOMEM;
+               goto err_destroy_table;
+       }
+
+       /* Set up our datapath device. */
+       parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
+       parms.type = OVS_VPORT_TYPE_INTERNAL;
+       parms.options = NULL;
+       parms.dp = dp;
+       parms.port_no = OVSP_LOCAL;
+       parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+
+       vport = new_vport(&parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               if (err == -EBUSY)
+                       err = -EEXIST;
+
+               goto err_destroy_percpu;
+       }
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_NEW);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto err_destroy_local_port;
+
+       list_add_tail(&dp->list_node, &dps);
+       rtnl_unlock();
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_datapath_multicast_group.id, info->nlhdr,
+                   GFP_KERNEL);
+       return 0;
+
+err_destroy_local_port:
+       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+err_destroy_percpu:
+       free_percpu(dp->stats_percpu);
+err_destroy_table:
+       ovs_flow_tbl_destroy(genl_dereference(dp->table));
+err_free_dp:
+       kfree(dp);
+err_put_module:
+       module_put(THIS_MODULE);
+err_unlock_rtnl:
+       rtnl_unlock();
+err:
+       return err;
+}
+
+static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct vport *vport, *next_vport;
+       struct sk_buff *reply;
+       struct datapath *dp;
+       int err;
+
+       rtnl_lock();
+       dp = lookup_datapath(info->userhdr, info->attrs);
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               goto exit_unlock;
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_DEL);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto exit_unlock;
+
+       list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
+               if (vport->port_no != OVSP_LOCAL)
+                       ovs_dp_detach_port(vport);
+
+       list_del(&dp->list_node);
+       ovs_dp_detach_port(rtnl_dereference(dp->ports[OVSP_LOCAL]));
+
+       /* rtnl_unlock() will wait until all the references to devices that
+        * are pending unregistration have been dropped.  We do it here to
+        * ensure that any internal devices (which contain DP pointers) are
+        * fully destroyed before freeing the datapath.
+        */
+       rtnl_unlock();
+
+       call_rcu(&dp->rcu, destroy_dp_rcu);
+       module_put(THIS_MODULE);
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_datapath_multicast_group.id, info->nlhdr,
+                   GFP_KERNEL);
+
+       return 0;
+
+exit_unlock:
+       rtnl_unlock();
+       return err;
+}
+
+static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *reply;
+       struct datapath *dp;
+       int err;
+
+       dp = lookup_datapath(info->userhdr, info->attrs);
+       if (IS_ERR(dp))
+               return PTR_ERR(dp);
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_NEW);
+       if (IS_ERR(reply)) {
+               err = PTR_ERR(reply);
+               netlink_set_err(init_net.genl_sock, 0,
+                               ovs_dp_datapath_multicast_group.id, err);
+               return 0;
+       }
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_datapath_multicast_group.id, info->nlhdr,
+                   GFP_KERNEL);
+
+       return 0;
+}
+
+static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *reply;
+       struct datapath *dp;
+
+       dp = lookup_datapath(info->userhdr, info->attrs);
+       if (IS_ERR(dp))
+               return PTR_ERR(dp);
+
+       reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+                                     info->snd_seq, OVS_DP_CMD_NEW);
+       if (IS_ERR(reply))
+               return PTR_ERR(reply);
+
+       return genlmsg_reply(reply, info);
+}
+
+static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct datapath *dp;
+       int skip = cb->args[0];
+       int i = 0;
+
+       list_for_each_entry(dp, &dps, list_node) {
+               if (i < skip)
+                       continue;
+               if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                        OVS_DP_CMD_NEW) < 0)
+                       break;
+               i++;
+       }
+
+       cb->args[0] = i;
+
+       return skb->len;
+}
+
+static struct genl_ops dp_datapath_genl_ops[] = {
+       { .cmd = OVS_DP_CMD_NEW,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_new
+       },
+       { .cmd = OVS_DP_CMD_DEL,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_del
+       },
+       { .cmd = OVS_DP_CMD_GET,
+         .flags = 0,               /* OK for unprivileged users. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_get,
+         .dumpit = ovs_dp_cmd_dump
+       },
+       { .cmd = OVS_DP_CMD_SET,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = datapath_policy,
+         .doit = ovs_dp_cmd_set,
+       },
+};
+
+static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+       [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+       [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+       [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_family dp_vport_genl_family = {
+       .id = GENL_ID_GENERATE,
+       .hdrsize = sizeof(struct ovs_header),
+       .name = OVS_VPORT_FAMILY,
+       .version = OVS_VPORT_VERSION,
+       .maxattr = OVS_VPORT_ATTR_MAX
+};
+
+struct genl_multicast_group ovs_dp_vport_multicast_group = {
+       .name = OVS_VPORT_MCGROUP
+};
+
+/* Called with RTNL lock or RCU read lock. */
+static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
+                                  u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+       struct ovs_header *ovs_header;
+       struct ovs_vport_stats vport_stats;
+       int err;
+
+       ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
+                                flags, cmd);
+       if (!ovs_header)
+               return -EMSGSIZE;
+
+       ovs_header->dp_ifindex = get_dpifindex(vport->dp);
+
+       NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
+       NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
+       NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
+       NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
+
+       ovs_vport_get_stats(vport, &vport_stats);
+       NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
+               &vport_stats);
+
+       err = ovs_vport_get_options(vport, skb);
+       if (err == -EMSGSIZE)
+               goto error;
+
+       return genlmsg_end(skb, ovs_header);
+
+nla_put_failure:
+       err = -EMSGSIZE;
+error:
+       genlmsg_cancel(skb, ovs_header);
+       return err;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 pid,
+                                        u32 seq, u8 cmd)
+{
+       struct sk_buff *skb;
+       int retval;
+
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       retval = ovs_vport_cmd_fill_info(vport, skb, pid, seq, 0, cmd);
+       if (retval < 0) {
+               kfree_skb(skb);
+               return ERR_PTR(retval);
+       }
+       return skb;
+}
+
+/* Called with RTNL lock or RCU read lock. */
+static struct vport *lookup_vport(struct ovs_header *ovs_header,
+                                 struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
+{
+       struct datapath *dp;
+       struct vport *vport;
+
+       if (a[OVS_VPORT_ATTR_NAME]) {
+               vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
+               if (!vport)
+                       return ERR_PTR(-ENODEV);
+               return vport;
+       } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
+               u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+               if (port_no >= DP_MAX_PORTS)
+                       return ERR_PTR(-EFBIG);
+
+               dp = get_dp(ovs_header->dp_ifindex);
+               if (!dp)
+                       return ERR_PTR(-ENODEV);
+
+               vport = rcu_dereference_rtnl(dp->ports[port_no]);
+               if (!vport)
+                       return ERR_PTR(-ENOENT);
+               return vport;
+       } else
+               return ERR_PTR(-EINVAL);
+}
+
+static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct vport_parms parms;
+       struct sk_buff *reply;
+       struct vport *vport;
+       struct datapath *dp;
+       u32 port_no;
+       int err;
+
+       err = -EINVAL;
+       if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
+           !a[OVS_VPORT_ATTR_UPCALL_PID])
+               goto exit;
+
+       rtnl_lock();
+       dp = get_dp(ovs_header->dp_ifindex);
+       err = -ENODEV;
+       if (!dp)
+               goto exit_unlock;
+
+       if (a[OVS_VPORT_ATTR_PORT_NO]) {
+               port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
+
+               err = -EFBIG;
+               if (port_no >= DP_MAX_PORTS)
+                       goto exit_unlock;
+
+               vport = rtnl_dereference(dp->ports[port_no]);
+               err = -EBUSY;
+               if (vport)
+                       goto exit_unlock;
+       } else {
+               for (port_no = 1; ; port_no++) {
+                       if (port_no >= DP_MAX_PORTS) {
+                               err = -EFBIG;
+                               goto exit_unlock;
+                       }
+                       vport = rtnl_dereference(dp->ports[port_no]);
+                       if (!vport)
+                               break;
+               }
+       }
+
+       parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
+       parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
+       parms.options = a[OVS_VPORT_ATTR_OPTIONS];
+       parms.dp = dp;
+       parms.port_no = port_no;
+       parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+       vport = new_vport(&parms);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_NEW);
+       if (IS_ERR(reply)) {
+               err = PTR_ERR(reply);
+               ovs_dp_detach_port(vport);
+               goto exit_unlock;
+       }
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+       rtnl_unlock();
+exit:
+       return err;
+}
+
+static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct sk_buff *reply;
+       struct vport *vport;
+       int err;
+
+       rtnl_lock();
+       vport = lookup_vport(info->userhdr, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       err = 0;
+       if (a[OVS_VPORT_ATTR_TYPE] &&
+           nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
+               err = -EINVAL;
+
+       if (!err && a[OVS_VPORT_ATTR_OPTIONS])
+               err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
+       if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+               vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_NEW);
+       if (IS_ERR(reply)) {
+               err = PTR_ERR(reply);
+               netlink_set_err(init_net.genl_sock, 0,
+                               ovs_dp_vport_multicast_group.id, err);
+               return 0;
+       }
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+       rtnl_unlock();
+       return err;
+}
+
+static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct sk_buff *reply;
+       struct vport *vport;
+       int err;
+
+       rtnl_lock();
+       vport = lookup_vport(info->userhdr, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       if (vport->port_no == OVSP_LOCAL) {
+               err = -EINVAL;
+               goto exit_unlock;
+       }
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_DEL);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto exit_unlock;
+
+       ovs_dp_detach_port(vport);
+
+       genl_notify(reply, genl_info_net(info), info->snd_pid,
+                   ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
+
+exit_unlock:
+       rtnl_unlock();
+       return err;
+}
+
+static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nlattr **a = info->attrs;
+       struct ovs_header *ovs_header = info->userhdr;
+       struct sk_buff *reply;
+       struct vport *vport;
+       int err;
+
+       rcu_read_lock();
+       vport = lookup_vport(ovs_header, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock;
+
+       reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
+                                        OVS_VPORT_CMD_NEW);
+       err = PTR_ERR(reply);
+       if (IS_ERR(reply))
+               goto exit_unlock;
+
+       rcu_read_unlock();
+
+       return genlmsg_reply(reply, info);
+
+exit_unlock:
+       rcu_read_unlock();
+       return err;
+}
+
+static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
+       struct datapath *dp;
+       u32 port_no;
+       int retval;
+
+       dp = get_dp(ovs_header->dp_ifindex);
+       if (!dp)
+               return -ENODEV;
+
+       rcu_read_lock();
+       for (port_no = cb->args[0]; port_no < DP_MAX_PORTS; port_no++) {
+               struct vport *vport;
+
+               vport = rcu_dereference(dp->ports[port_no]);
+               if (!vport)
+                       continue;
+
+               if (ovs_vport_cmd_fill_info(vport, skb, NETLINK_CB(cb->skb).pid,
+                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                           OVS_VPORT_CMD_NEW) < 0)
+                       break;
+       }
+       rcu_read_unlock();
+
+       cb->args[0] = port_no;
+       retval = skb->len;
+
+       return retval;
+}
+
+static void rehash_flow_table(struct work_struct *work)
+{
+       struct datapath *dp;
+
+       genl_lock();
+
+       list_for_each_entry(dp, &dps, list_node) {
+               struct flow_table *old_table = genl_dereference(dp->table);
+               struct flow_table *new_table;
+
+               new_table = ovs_flow_tbl_rehash(old_table);
+               if (!IS_ERR(new_table)) {
+                       rcu_assign_pointer(dp->table, new_table);
+                       ovs_flow_tbl_deferred_destroy(old_table);
+               }
+       }
+
+       genl_unlock();
+
+       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+}
+
+static struct genl_ops dp_vport_genl_ops[] = {
+       { .cmd = OVS_VPORT_CMD_NEW,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_new
+       },
+       { .cmd = OVS_VPORT_CMD_DEL,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_del
+       },
+       { .cmd = OVS_VPORT_CMD_GET,
+         .flags = 0,               /* OK for unprivileged users. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_get,
+         .dumpit = ovs_vport_cmd_dump
+       },
+       { .cmd = OVS_VPORT_CMD_SET,
+         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+         .policy = vport_policy,
+         .doit = ovs_vport_cmd_set,
+       },
+};
+
+struct genl_family_and_ops {
+       struct genl_family *family;
+       struct genl_ops *ops;
+       int n_ops;
+       struct genl_multicast_group *group;
+};
+
+static const struct genl_family_and_ops dp_genl_families[] = {
+       { &dp_datapath_genl_family,
+         dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
+         &ovs_dp_datapath_multicast_group },
+       { &dp_vport_genl_family,
+         dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
+         &ovs_dp_vport_multicast_group },
+       { &dp_flow_genl_family,
+         dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
+         &ovs_dp_flow_multicast_group },
+       { &dp_packet_genl_family,
+         dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
+         NULL },
+};
+
+static void dp_unregister_genl(int n_families)
+{
+       int i;
+
+       for (i = 0; i < n_families; i++)
+               genl_unregister_family(dp_genl_families[i].family);
+}
+
+static int dp_register_genl(void)
+{
+       int n_registered;
+       int err;
+       int i;
+
+       n_registered = 0;
+       for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
+               const struct genl_family_and_ops *f = &dp_genl_families[i];
+
+               err = genl_register_family_with_ops(f->family, f->ops,
+                                                   f->n_ops);
+               if (err)
+                       goto error;
+               n_registered++;
+
+               if (f->group) {
+                       err = genl_register_mc_group(f->family, f->group);
+                       if (err)
+                               goto error;
+               }
+       }
+
+       return 0;
+
+error:
+       dp_unregister_genl(n_registered);
+       return err;
+}
+
+static int __init dp_init(void)
+{
+       struct sk_buff *dummy_skb;
+       int err;
+
+       BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
+
+       pr_info("Open vSwitch switching datapath\n");
+
+       err = ovs_flow_init();
+       if (err)
+               goto error;
+
+       err = ovs_vport_init();
+       if (err)
+               goto error_flow_exit;
+
+       err = register_netdevice_notifier(&ovs_dp_device_notifier);
+       if (err)
+               goto error_vport_exit;
+
+       err = dp_register_genl();
+       if (err < 0)
+               goto error_unreg_notifier;
+
+       schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
+
+       return 0;
+
+error_unreg_notifier:
+       unregister_netdevice_notifier(&ovs_dp_device_notifier);
+error_vport_exit:
+       ovs_vport_exit();
+error_flow_exit:
+       ovs_flow_exit();
+error:
+       return err;
+}
+
+static void dp_cleanup(void)
+{
+       cancel_delayed_work_sync(&rehash_flow_wq);
+       rcu_barrier();
+       dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
+       unregister_netdevice_notifier(&ovs_dp_device_notifier);
+       ovs_vport_exit();
+       ovs_flow_exit();
+}
+
+module_init(dp_init);
+module_exit(dp_cleanup);
+
+MODULE_DESCRIPTION("Open vSwitch switching datapath");
+MODULE_LICENSE("GPL");
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
new file mode 100644 (file)
index 0000000..5b9f884
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef DATAPATH_H
+#define DATAPATH_H 1
+
+#include <asm/page.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/version.h>
+
+#include "flow.h"
+
+struct vport;
+
+#define DP_MAX_PORTS 1024
+#define SAMPLE_ACTION_DEPTH 3
+
+/**
+ * struct dp_stats_percpu - per-cpu packet processing statistics for a given
+ * datapath.
+ * @n_hit: Number of received packets for which a matching flow was found in
+ * the flow table.
+ * @n_miss: Number of received packets that had no matching flow in the flow
+ * table.  The sum of @n_hit and @n_miss is the number of packets that have
+ * been received by the datapath.
+ * @n_lost: Number of received packets that had no matching flow in the flow
+ * table that could not be sent to userspace (normally due to an overflow in
+ * one of the datapath's queues).
+ */
+struct dp_stats_percpu {
+       u64 n_hit;
+       u64 n_missed;
+       u64 n_lost;
+       struct u64_stats_sync sync;
+};
+
+/**
+ * struct datapath - datapath for flow-based packet switching
+ * @rcu: RCU callback head for deferred destruction.
+ * @list_node: Element in global 'dps' list.
+ * @n_flows: Number of flows currently in flow table.
+ * @table: Current flow table.  Protected by genl_lock and RCU.
+ * @ports: Map from port number to &struct vport.  %OVSP_LOCAL port
+ * always exists, other ports may be %NULL.  Protected by RTNL and RCU.
+ * @port_list: List of all ports in @ports in arbitrary order.  RTNL required
+ * to iterate or modify.
+ * @stats_percpu: Per-CPU datapath statistics.
+ *
+ * Context: See the comment on locking at the top of datapath.c for additional
+ * locking information.
+ */
+struct datapath {
+       struct rcu_head rcu;
+       struct list_head list_node;
+
+       /* Flow table. */
+       struct flow_table __rcu *table;
+
+       /* Switch ports. */
+       struct vport __rcu *ports[DP_MAX_PORTS];
+       struct list_head port_list;
+
+       /* Stats. */
+       struct dp_stats_percpu __percpu *stats_percpu;
+};
+
+/**
+ * struct ovs_skb_cb - OVS data in skb CB
+ * @flow: The flow associated with this packet.  May be %NULL if no flow.
+ */
+struct ovs_skb_cb {
+       struct sw_flow          *flow;
+};
+#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+
+/**
+ * struct dp_upcall - metadata to include with a packet to send to userspace
+ * @cmd: One of %OVS_PACKET_CMD_*.
+ * @key: Becomes %OVS_PACKET_ATTR_KEY.  Must be nonnull.
+ * @userdata: If nonnull, its u64 value is extracted and passed to userspace as
+ * %OVS_PACKET_ATTR_USERDATA.
+ * @pid: Netlink PID to which packet should be sent.  If @pid is 0 then no
+ * packet is sent and the packet is accounted in the datapath's @n_lost
+ * counter.
+ */
+struct dp_upcall_info {
+       u8 cmd;
+       const struct sw_flow_key *key;
+       const struct nlattr *userdata;
+       u32 pid;
+};
+
+extern struct notifier_block ovs_dp_device_notifier;
+extern struct genl_multicast_group ovs_dp_vport_multicast_group;
+
+void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_detach_port(struct vport *);
+int ovs_dp_upcall(struct datapath *, struct sk_buff *,
+                 const struct dp_upcall_info *);
+
+const char *ovs_dp_name(const struct datapath *dp);
+struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
+                                        u8 cmd);
+
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
+#endif /* datapath.h */
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
new file mode 100644 (file)
index 0000000..4673651
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/netdevice.h>
+#include <net/genetlink.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+static int dp_device_event(struct notifier_block *unused, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct vport *vport;
+
+       if (ovs_is_internal_dev(dev))
+               vport = ovs_internal_dev_get_vport(dev);
+       else
+               vport = ovs_netdev_get_vport(dev);
+
+       if (!vport)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               if (!ovs_is_internal_dev(dev)) {
+                       struct sk_buff *notify;
+
+                       notify = ovs_vport_cmd_build_info(vport, 0, 0,
+                                                         OVS_VPORT_CMD_DEL);
+                       ovs_dp_detach_port(vport);
+                       if (IS_ERR(notify)) {
+                               netlink_set_err(init_net.genl_sock, 0,
+                                               ovs_dp_vport_multicast_group.id,
+                                               PTR_ERR(notify));
+                               break;
+                       }
+
+                       genlmsg_multicast(notify, 0, ovs_dp_vport_multicast_group.id,
+                                         GFP_KERNEL);
+               }
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+struct notifier_block ovs_dp_device_notifier = {
+       .notifier_call = dp_device_event
+};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
new file mode 100644 (file)
index 0000000..fe7f020
--- /dev/null
@@ -0,0 +1,1346 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "flow.h"
+#include "datapath.h"
+#include <linux/uaccess.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/llc_pdu.h>
+#include <linux/kernel.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/llc.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/rculist.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+
+static struct kmem_cache *flow_cache;
+
+static int check_header(struct sk_buff *skb, int len)
+{
+       if (unlikely(skb->len < len))
+               return -EINVAL;
+       if (unlikely(!pskb_may_pull(skb, len)))
+               return -ENOMEM;
+       return 0;
+}
+
+static bool arphdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_network_offset(skb) +
+                                 sizeof(struct arp_eth_header));
+}
+
+static int check_iphdr(struct sk_buff *skb)
+{
+       unsigned int nh_ofs = skb_network_offset(skb);
+       unsigned int ip_len;
+       int err;
+
+       err = check_header(skb, nh_ofs + sizeof(struct iphdr));
+       if (unlikely(err))
+               return err;
+
+       ip_len = ip_hdrlen(skb);
+       if (unlikely(ip_len < sizeof(struct iphdr) ||
+                    skb->len < nh_ofs + ip_len))
+               return -EINVAL;
+
+       skb_set_transport_header(skb, nh_ofs + ip_len);
+       return 0;
+}
+
+static bool tcphdr_ok(struct sk_buff *skb)
+{
+       int th_ofs = skb_transport_offset(skb);
+       int tcp_len;
+
+       if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
+               return false;
+
+       tcp_len = tcp_hdrlen(skb);
+       if (unlikely(tcp_len < sizeof(struct tcphdr) ||
+                    skb->len < th_ofs + tcp_len))
+               return false;
+
+       return true;
+}
+
+static bool udphdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_transport_offset(skb) +
+                                 sizeof(struct udphdr));
+}
+
+static bool icmphdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_transport_offset(skb) +
+                                 sizeof(struct icmphdr));
+}
+
+u64 ovs_flow_used_time(unsigned long flow_jiffies)
+{
+       struct timespec cur_ts;
+       u64 cur_ms, idle_ms;
+
+       ktime_get_ts(&cur_ts);
+       idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
+       cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+                cur_ts.tv_nsec / NSEC_PER_MSEC;
+
+       return cur_ms - idle_ms;
+}
+
+#define SW_FLOW_KEY_OFFSET(field)              \
+       (offsetof(struct sw_flow_key, field) +  \
+        FIELD_SIZEOF(struct sw_flow_key, field))
+
+static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
+                        int *key_lenp)
+{
+       unsigned int nh_ofs = skb_network_offset(skb);
+       unsigned int nh_len;
+       int payload_ofs;
+       struct ipv6hdr *nh;
+       uint8_t nexthdr;
+       __be16 frag_off;
+       int err;
+
+       *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
+
+       err = check_header(skb, nh_ofs + sizeof(*nh));
+       if (unlikely(err))
+               return err;
+
+       nh = ipv6_hdr(skb);
+       nexthdr = nh->nexthdr;
+       payload_ofs = (u8 *)(nh + 1) - skb->data;
+
+       key->ip.proto = NEXTHDR_NONE;
+       key->ip.tos = ipv6_get_dsfield(nh);
+       key->ip.ttl = nh->hop_limit;
+       key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+       key->ipv6.addr.src = nh->saddr;
+       key->ipv6.addr.dst = nh->daddr;
+
+       payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
+       if (unlikely(payload_ofs < 0))
+               return -EINVAL;
+
+       if (frag_off) {
+               if (frag_off & htons(~0x7))
+                       key->ip.frag = OVS_FRAG_TYPE_LATER;
+               else
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+       }
+
+       nh_len = payload_ofs - nh_ofs;
+       skb_set_transport_header(skb, nh_ofs + nh_len);
+       key->ip.proto = nexthdr;
+       return nh_len;
+}
+
+static bool icmp6hdr_ok(struct sk_buff *skb)
+{
+       return pskb_may_pull(skb, skb_transport_offset(skb) +
+                                 sizeof(struct icmp6hdr));
+}
+
+#define TCP_FLAGS_OFFSET 13
+#define TCP_FLAG_MASK 0x3f
+
+void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
+{
+       u8 tcp_flags = 0;
+
+       if (flow->key.eth.type == htons(ETH_P_IP) &&
+           flow->key.ip.proto == IPPROTO_TCP) {
+               u8 *tcp = (u8 *)tcp_hdr(skb);
+               tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
+       }
+
+       spin_lock(&flow->lock);
+       flow->used = jiffies;
+       flow->packet_count++;
+       flow->byte_count += skb->len;
+       flow->tcp_flags |= tcp_flags;
+       spin_unlock(&flow->lock);
+}
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
+{
+       int actions_len = nla_len(actions);
+       struct sw_flow_actions *sfa;
+
+       /* At least DP_MAX_PORTS actions are required to be able to flood a
+        * packet to every port.  Factor of 2 allows for setting VLAN tags,
+        * etc. */
+       if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4))
+               return ERR_PTR(-EINVAL);
+
+       sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
+       if (!sfa)
+               return ERR_PTR(-ENOMEM);
+
+       sfa->actions_len = actions_len;
+       memcpy(sfa->actions, nla_data(actions), actions_len);
+       return sfa;
+}
+
+struct sw_flow *ovs_flow_alloc(void)
+{
+       struct sw_flow *flow;
+
+       flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+       if (!flow)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&flow->lock);
+       flow->sf_acts = NULL;
+
+       return flow;
+}
+
+static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
+{
+       hash = jhash_1word(hash, table->hash_seed);
+       return flex_array_get(table->buckets,
+                               (hash & (table->n_buckets - 1)));
+}
+
+static struct flex_array *alloc_buckets(unsigned int n_buckets)
+{
+       struct flex_array *buckets;
+       int i, err;
+
+       buckets = flex_array_alloc(sizeof(struct hlist_head *),
+                                  n_buckets, GFP_KERNEL);
+       if (!buckets)
+               return NULL;
+
+       err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
+       if (err) {
+               flex_array_free(buckets);
+               return NULL;
+       }
+
+       for (i = 0; i < n_buckets; i++)
+               INIT_HLIST_HEAD((struct hlist_head *)
+                                       flex_array_get(buckets, i));
+
+       return buckets;
+}
+
+static void free_buckets(struct flex_array *buckets)
+{
+       flex_array_free(buckets);
+}
+
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+       struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
+
+       if (!table)
+               return NULL;
+
+       table->buckets = alloc_buckets(new_size);
+
+       if (!table->buckets) {
+               kfree(table);
+               return NULL;
+       }
+       table->n_buckets = new_size;
+       table->count = 0;
+       table->node_ver = 0;
+       table->keep_flows = false;
+       get_random_bytes(&table->hash_seed, sizeof(u32));
+
+       return table;
+}
+
+void ovs_flow_tbl_destroy(struct flow_table *table)
+{
+       int i;
+
+       if (!table)
+               return;
+
+       if (table->keep_flows)
+               goto skip_flows;
+
+       for (i = 0; i < table->n_buckets; i++) {
+               struct sw_flow *flow;
+               struct hlist_head *head = flex_array_get(table->buckets, i);
+               struct hlist_node *node, *n;
+               int ver = table->node_ver;
+
+               hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
+                       hlist_del_rcu(&flow->hash_node[ver]);
+                       ovs_flow_free(flow);
+               }
+       }
+
+skip_flows:
+       free_buckets(table->buckets);
+       kfree(table);
+}
+
+static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
+{
+       struct flow_table *table = container_of(rcu, struct flow_table, rcu);
+
+       ovs_flow_tbl_destroy(table);
+}
+
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
+{
+       if (!table)
+               return;
+
+       call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+}
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+{
+       struct sw_flow *flow;
+       struct hlist_head *head;
+       struct hlist_node *n;
+       int ver;
+       int i;
+
+       ver = table->node_ver;
+       while (*bucket < table->n_buckets) {
+               i = 0;
+               head = flex_array_get(table->buckets, *bucket);
+               hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
+                       if (i < *last) {
+                               i++;
+                               continue;
+                       }
+                       *last = i + 1;
+                       return flow;
+               }
+               (*bucket)++;
+               *last = 0;
+       }
+
+       return NULL;
+}
+
+static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
+{
+       int old_ver;
+       int i;
+
+       old_ver = old->node_ver;
+       new->node_ver = !old_ver;
+
+       /* Insert in new table. */
+       for (i = 0; i < old->n_buckets; i++) {
+               struct sw_flow *flow;
+               struct hlist_head *head;
+               struct hlist_node *n;
+
+               head = flex_array_get(old->buckets, i);
+
+               hlist_for_each_entry(flow, n, head, hash_node[old_ver])
+                       ovs_flow_tbl_insert(new, flow);
+       }
+       old->keep_flows = true;
+}
+
+static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
+{
+       struct flow_table *new_table;
+
+       new_table = ovs_flow_tbl_alloc(n_buckets);
+       if (!new_table)
+               return ERR_PTR(-ENOMEM);
+
+       flow_table_copy_flows(table, new_table);
+
+       return new_table;
+}
+
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
+{
+       return __flow_tbl_rehash(table, table->n_buckets);
+}
+
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
+{
+       return __flow_tbl_rehash(table, table->n_buckets * 2);
+}
+
+void ovs_flow_free(struct sw_flow *flow)
+{
+       if (unlikely(!flow))
+               return;
+
+       kfree((struct sf_flow_acts __force *)flow->sf_acts);
+       kmem_cache_free(flow_cache, flow);
+}
+
+/* RCU callback used by ovs_flow_deferred_free. */
+static void rcu_free_flow_callback(struct rcu_head *rcu)
+{
+       struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
+
+       ovs_flow_free(flow);
+}
+
+/* Schedules 'flow' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free(struct sw_flow *flow)
+{
+       call_rcu(&flow->rcu, rcu_free_flow_callback);
+}
+
+/* RCU callback used by ovs_flow_deferred_free_acts. */
+static void rcu_free_acts_callback(struct rcu_head *rcu)
+{
+       struct sw_flow_actions *sf_acts = container_of(rcu,
+                       struct sw_flow_actions, rcu);
+       kfree(sf_acts);
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
+{
+       call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
+}
+
+static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+       struct qtag_prefix {
+               __be16 eth_type; /* ETH_P_8021Q */
+               __be16 tci;
+       };
+       struct qtag_prefix *qp;
+
+       if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
+               return 0;
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
+                                        sizeof(__be16))))
+               return -ENOMEM;
+
+       qp = (struct qtag_prefix *) skb->data;
+       key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
+       __skb_pull(skb, sizeof(struct qtag_prefix));
+
+       return 0;
+}
+
+static __be16 parse_ethertype(struct sk_buff *skb)
+{
+       struct llc_snap_hdr {
+               u8  dsap;  /* Always 0xAA */
+               u8  ssap;  /* Always 0xAA */
+               u8  ctrl;
+               u8  oui[3];
+               __be16 ethertype;
+       };
+       struct llc_snap_hdr *llc;
+       __be16 proto;
+
+       proto = *(__be16 *) skb->data;
+       __skb_pull(skb, sizeof(__be16));
+
+       if (ntohs(proto) >= 1536)
+               return proto;
+
+       if (skb->len < sizeof(struct llc_snap_hdr))
+               return htons(ETH_P_802_2);
+
+       if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
+               return htons(0);
+
+       llc = (struct llc_snap_hdr *) skb->data;
+       if (llc->dsap != LLC_SAP_SNAP ||
+           llc->ssap != LLC_SAP_SNAP ||
+           (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
+               return htons(ETH_P_802_2);
+
+       __skb_pull(skb, sizeof(struct llc_snap_hdr));
+       return llc->ethertype;
+}
+
+static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
+                       int *key_lenp, int nh_len)
+{
+       struct icmp6hdr *icmp = icmp6_hdr(skb);
+       int error = 0;
+       int key_len;
+
+       /* The ICMPv6 type and code fields use the 16-bit transport port
+        * fields, so we need to store them in 16-bit network byte order.
+        */
+       key->ipv6.tp.src = htons(icmp->icmp6_type);
+       key->ipv6.tp.dst = htons(icmp->icmp6_code);
+       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+
+       if (icmp->icmp6_code == 0 &&
+           (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+            icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+               int icmp_len = skb->len - skb_transport_offset(skb);
+               struct nd_msg *nd;
+               int offset;
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+               /* In order to process neighbor discovery options, we need the
+                * entire packet.
+                */
+               if (unlikely(icmp_len < sizeof(*nd)))
+                       goto out;
+               if (unlikely(skb_linearize(skb))) {
+                       error = -ENOMEM;
+                       goto out;
+               }
+
+               nd = (struct nd_msg *)skb_transport_header(skb);
+               key->ipv6.nd.target = nd->target;
+               key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+
+               icmp_len -= sizeof(*nd);
+               offset = 0;
+               while (icmp_len >= 8) {
+                       struct nd_opt_hdr *nd_opt =
+                                (struct nd_opt_hdr *)(nd->opt + offset);
+                       int opt_len = nd_opt->nd_opt_len * 8;
+
+                       if (unlikely(!opt_len || opt_len > icmp_len))
+                               goto invalid;
+
+                       /* Store the link layer address if the appropriate
+                        * option is provided.  It is considered an error if
+                        * the same link layer option is specified twice.
+                        */
+                       if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
+                           && opt_len == 8) {
+                               if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
+                                       goto invalid;
+                               memcpy(key->ipv6.nd.sll,
+                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                       } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
+                                  && opt_len == 8) {
+                               if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
+                                       goto invalid;
+                               memcpy(key->ipv6.nd.tll,
+                                   &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
+                       }
+
+                       icmp_len -= opt_len;
+                       offset += opt_len;
+               }
+       }
+
+       goto out;
+
+invalid:
+       memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
+       memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
+       memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
+
+out:
+       *key_lenp = key_len;
+       return error;
+}
+
+/**
+ * ovs_flow_extract - extracts a flow key from an Ethernet frame.
+ * @skb: sk_buff that contains the frame, with skb->data pointing to the
+ * Ethernet header
+ * @in_port: port number on which @skb was received.
+ * @key: output flow key
+ * @key_lenp: length of output flow key
+ *
+ * The caller must ensure that skb->len >= ETH_HLEN.
+ *
+ * Returns 0 if successful, otherwise a negative errno value.
+ *
+ * Initializes @skb header pointers as follows:
+ *
+ *    - skb->mac_header: the Ethernet header.
+ *
+ *    - skb->network_header: just past the Ethernet header, or just past the
+ *      VLAN header, to the first byte of the Ethernet payload.
+ *
+ *    - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
+ *      on output, then just past the IP header, if one is present and
+ *      of a correct length, otherwise the same as skb->network_header.
+ *      For other key->dl_type values it is left untouched.
+ */
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
+                int *key_lenp)
+{
+       int error = 0;
+       int key_len = SW_FLOW_KEY_OFFSET(eth);
+       struct ethhdr *eth;
+
+       memset(key, 0, sizeof(*key));
+
+       key->phy.priority = skb->priority;
+       key->phy.in_port = in_port;
+
+       skb_reset_mac_header(skb);
+
+       /* Link layer.  We are guaranteed to have at least the 14 byte Ethernet
+        * header in the linear data area.
+        */
+       eth = eth_hdr(skb);
+       memcpy(key->eth.src, eth->h_source, ETH_ALEN);
+       memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
+
+       __skb_pull(skb, 2 * ETH_ALEN);
+
+       if (vlan_tx_tag_present(skb))
+               key->eth.tci = htons(skb->vlan_tci);
+       else if (eth->h_proto == htons(ETH_P_8021Q))
+               if (unlikely(parse_vlan(skb, key)))
+                       return -ENOMEM;
+
+       key->eth.type = parse_ethertype(skb);
+       if (unlikely(key->eth.type == htons(0)))
+               return -ENOMEM;
+
+       skb_reset_network_header(skb);
+       __skb_push(skb, skb->data - skb_mac_header(skb));
+
+       /* Network layer. */
+       if (key->eth.type == htons(ETH_P_IP)) {
+               struct iphdr *nh;
+               __be16 offset;
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+
+               error = check_iphdr(skb);
+               if (unlikely(error)) {
+                       if (error == -EINVAL) {
+                               skb->transport_header = skb->network_header;
+                               error = 0;
+                       }
+                       goto out;
+               }
+
+               nh = ip_hdr(skb);
+               key->ipv4.addr.src = nh->saddr;
+               key->ipv4.addr.dst = nh->daddr;
+
+               key->ip.proto = nh->protocol;
+               key->ip.tos = nh->tos;
+               key->ip.ttl = nh->ttl;
+
+               offset = nh->frag_off & htons(IP_OFFSET);
+               if (offset) {
+                       key->ip.frag = OVS_FRAG_TYPE_LATER;
+                       goto out;
+               }
+               if (nh->frag_off & htons(IP_MF) ||
+                        skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+               /* Transport layer. */
+               if (key->ip.proto == IPPROTO_TCP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+                       if (tcphdr_ok(skb)) {
+                               struct tcphdr *tcp = tcp_hdr(skb);
+                               key->ipv4.tp.src = tcp->source;
+                               key->ipv4.tp.dst = tcp->dest;
+                       }
+               } else if (key->ip.proto == IPPROTO_UDP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+                       if (udphdr_ok(skb)) {
+                               struct udphdr *udp = udp_hdr(skb);
+                               key->ipv4.tp.src = udp->source;
+                               key->ipv4.tp.dst = udp->dest;
+                       }
+               } else if (key->ip.proto == IPPROTO_ICMP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+                       if (icmphdr_ok(skb)) {
+                               struct icmphdr *icmp = icmp_hdr(skb);
+                               /* The ICMP type and code fields use the 16-bit
+                                * transport port fields, so we need to store
+                                * them in 16-bit network byte order. */
+                               key->ipv4.tp.src = htons(icmp->type);
+                               key->ipv4.tp.dst = htons(icmp->code);
+                       }
+               }
+
+       } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
+               struct arp_eth_header *arp;
+
+               arp = (struct arp_eth_header *)skb_network_header(skb);
+
+               if (arp->ar_hrd == htons(ARPHRD_ETHER)
+                               && arp->ar_pro == htons(ETH_P_IP)
+                               && arp->ar_hln == ETH_ALEN
+                               && arp->ar_pln == 4) {
+
+                       /* We only match on the lower 8 bits of the opcode. */
+                       if (ntohs(arp->ar_op) <= 0xff)
+                               key->ip.proto = ntohs(arp->ar_op);
+
+                       if (key->ip.proto == ARPOP_REQUEST
+                                       || key->ip.proto == ARPOP_REPLY) {
+                               memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
+                               memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
+                               memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
+                               memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
+                               key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+                       }
+               }
+       } else if (key->eth.type == htons(ETH_P_IPV6)) {
+               int nh_len;             /* IPv6 Header + Extensions */
+
+               nh_len = parse_ipv6hdr(skb, key, &key_len);
+               if (unlikely(nh_len < 0)) {
+                       if (nh_len == -EINVAL)
+                               skb->transport_header = skb->network_header;
+                       else
+                               error = nh_len;
+                       goto out;
+               }
+
+               if (key->ip.frag == OVS_FRAG_TYPE_LATER)
+                       goto out;
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
+               /* Transport layer. */
+               if (key->ip.proto == NEXTHDR_TCP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+                       if (tcphdr_ok(skb)) {
+                               struct tcphdr *tcp = tcp_hdr(skb);
+                               key->ipv6.tp.src = tcp->source;
+                               key->ipv6.tp.dst = tcp->dest;
+                       }
+               } else if (key->ip.proto == NEXTHDR_UDP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+                       if (udphdr_ok(skb)) {
+                               struct udphdr *udp = udp_hdr(skb);
+                               key->ipv6.tp.src = udp->source;
+                               key->ipv6.tp.dst = udp->dest;
+                       }
+               } else if (key->ip.proto == NEXTHDR_ICMP) {
+                       key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+                       if (icmp6hdr_ok(skb)) {
+                               error = parse_icmpv6(skb, key, &key_len, nh_len);
+                               if (error < 0)
+                                       goto out;
+                       }
+               }
+       }
+
+out:
+       *key_lenp = key_len;
+       return error;
+}
+
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
+{
+       return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+                               struct sw_flow_key *key, int key_len)
+{
+       struct sw_flow *flow;
+       struct hlist_node *n;
+       struct hlist_head *head;
+       u32 hash;
+
+       hash = ovs_flow_hash(key, key_len);
+
+       head = find_bucket(table, hash);
+       hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
+
+               if (flow->hash == hash &&
+                   !memcmp(&flow->key, key, key_len)) {
+                       return flow;
+               }
+       }
+       return NULL;
+}
+
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+       struct hlist_head *head;
+
+       head = find_bucket(table, flow->hash);
+       hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+       table->count++;
+}
+
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+{
+       hlist_del_rcu(&flow->hash_node[table->node_ver]);
+       table->count--;
+       BUG_ON(table->count < 0);
+}
+
+/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
+const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
+       [OVS_KEY_ATTR_ENCAP] = -1,
+       [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
+       [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
+       [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
+       [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
+       [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
+       [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
+       [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
+       [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
+       [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+       [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
+       [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
+       [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
+       [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+};
+
+static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+                                 const struct nlattr *a[], u32 *attrs)
+{
+       const struct ovs_key_icmp *icmp_key;
+       const struct ovs_key_tcp *tcp_key;
+       const struct ovs_key_udp *udp_key;
+
+       switch (swkey->ip.proto) {
+       case IPPROTO_TCP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+               tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+               swkey->ipv4.tp.src = tcp_key->tcp_src;
+               swkey->ipv4.tp.dst = tcp_key->tcp_dst;
+               break;
+
+       case IPPROTO_UDP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+               udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+               swkey->ipv4.tp.src = udp_key->udp_src;
+               swkey->ipv4.tp.dst = udp_key->udp_dst;
+               break;
+
+       case IPPROTO_ICMP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
+               icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+               swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
+               swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
+               break;
+       }
+
+       return 0;
+}
+
+static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
+                                 const struct nlattr *a[], u32 *attrs)
+{
+       const struct ovs_key_icmpv6 *icmpv6_key;
+       const struct ovs_key_tcp *tcp_key;
+       const struct ovs_key_udp *udp_key;
+
+       switch (swkey->ip.proto) {
+       case IPPROTO_TCP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+               tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+               swkey->ipv6.tp.src = tcp_key->tcp_src;
+               swkey->ipv6.tp.dst = tcp_key->tcp_dst;
+               break;
+
+       case IPPROTO_UDP:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+               udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+               swkey->ipv6.tp.src = udp_key->udp_src;
+               swkey->ipv6.tp.dst = udp_key->udp_dst;
+               break;
+
+       case IPPROTO_ICMPV6:
+               if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
+                       return -EINVAL;
+               *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+
+               *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
+               icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+               swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
+               swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
+
+               if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+                   swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+                       const struct ovs_key_nd *nd_key;
+
+                       if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
+                               return -EINVAL;
+                       *attrs &= ~(1 << OVS_KEY_ATTR_ND);
+
+                       *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
+                       nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+                       memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
+                              sizeof(swkey->ipv6.nd.target));
+                       memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
+                       memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
+               }
+               break;
+       }
+
+       return 0;
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+                             const struct nlattr *a[], u32 *attrsp)
+{
+       const struct nlattr *nla;
+       u32 attrs;
+       int rem;
+
+       attrs = 0;
+       nla_for_each_nested(nla, attr, rem) {
+               u16 type = nla_type(nla);
+               int expected_len;
+
+               if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
+                       return -EINVAL;
+
+               expected_len = ovs_key_lens[type];
+               if (nla_len(nla) != expected_len && expected_len != -1)
+                       return -EINVAL;
+
+               attrs |= 1 << type;
+               a[type] = nla;
+       }
+       if (rem)
+               return -EINVAL;
+
+       *attrsp = attrs;
+       return 0;
+}
+
+/**
+ * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
+ * @swkey: receives the extracted flow key.
+ * @key_lenp: number of bytes used in @swkey.
+ * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ */
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+                     const struct nlattr *attr)
+{
+       const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+       const struct ovs_key_ethernet *eth_key;
+       int key_len;
+       u32 attrs;
+       int err;
+
+       memset(swkey, 0, sizeof(struct sw_flow_key));
+       key_len = SW_FLOW_KEY_OFFSET(eth);
+
+       err = parse_flow_nlattrs(attr, a, &attrs);
+       if (err)
+               return err;
+
+       /* Metadata attributes. */
+       if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+               swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
+               attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+       }
+       if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+               u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
+               if (in_port >= DP_MAX_PORTS)
+                       return -EINVAL;
+               swkey->phy.in_port = in_port;
+               attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+       } else {
+               swkey->phy.in_port = USHRT_MAX;
+       }
+
+       /* Data attributes. */
+       if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
+               return -EINVAL;
+       attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+
+       eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+       memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
+       memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
+
+       if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
+           nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
+               const struct nlattr *encap;
+               __be16 tci;
+
+               if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
+                             (1 << OVS_KEY_ATTR_ETHERTYPE) |
+                             (1 << OVS_KEY_ATTR_ENCAP)))
+                       return -EINVAL;
+
+               encap = a[OVS_KEY_ATTR_ENCAP];
+               tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+               if (tci & htons(VLAN_TAG_PRESENT)) {
+                       swkey->eth.tci = tci;
+
+                       err = parse_flow_nlattrs(encap, a, &attrs);
+                       if (err)
+                               return err;
+               } else if (!tci) {
+                       /* Corner case for truncated 802.1Q header. */
+                       if (nla_len(encap))
+                               return -EINVAL;
+
+                       swkey->eth.type = htons(ETH_P_8021Q);
+                       *key_lenp = key_len;
+                       return 0;
+               } else {
+                       return -EINVAL;
+               }
+       }
+
+       if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
+               swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+               if (ntohs(swkey->eth.type) < 1536)
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+       } else {
+               swkey->eth.type = htons(ETH_P_802_2);
+       }
+
+       if (swkey->eth.type == htons(ETH_P_IP)) {
+               const struct ovs_key_ipv4 *ipv4_key;
+
+               if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
+               ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
+               if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
+                       return -EINVAL;
+               swkey->ip.proto = ipv4_key->ipv4_proto;
+               swkey->ip.tos = ipv4_key->ipv4_tos;
+               swkey->ip.ttl = ipv4_key->ipv4_ttl;
+               swkey->ip.frag = ipv4_key->ipv4_frag;
+               swkey->ipv4.addr.src = ipv4_key->ipv4_src;
+               swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
+
+               if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+                       err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+                       if (err)
+                               return err;
+               }
+       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+               const struct ovs_key_ipv6 *ipv6_key;
+
+               if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
+               ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
+               if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+                       return -EINVAL;
+               swkey->ipv6.label = ipv6_key->ipv6_label;
+               swkey->ip.proto = ipv6_key->ipv6_proto;
+               swkey->ip.tos = ipv6_key->ipv6_tclass;
+               swkey->ip.ttl = ipv6_key->ipv6_hlimit;
+               swkey->ip.frag = ipv6_key->ipv6_frag;
+               memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
+                      sizeof(swkey->ipv6.addr.src));
+               memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
+                      sizeof(swkey->ipv6.addr.dst));
+
+               if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+                       err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
+                       if (err)
+                               return err;
+               }
+       } else if (swkey->eth.type == htons(ETH_P_ARP)) {
+               const struct ovs_key_arp *arp_key;
+
+               if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
+                       return -EINVAL;
+               attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+
+               key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
+               arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+               swkey->ipv4.addr.src = arp_key->arp_sip;
+               swkey->ipv4.addr.dst = arp_key->arp_tip;
+               if (arp_key->arp_op & htons(0xff00))
+                       return -EINVAL;
+               swkey->ip.proto = ntohs(arp_key->arp_op);
+               memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
+               memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
+       }
+
+       if (attrs)
+               return -EINVAL;
+       *key_lenp = key_len;
+
+       return 0;
+}
+
+/**
+ * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
+ * @in_port: receives the extracted input port.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence.
+ *
+ * This parses a series of Netlink attributes that form a flow key, which must
+ * take the same form accepted by flow_from_nlattrs(), but only enough of it to
+ * get the metadata, that is, the parts of the flow key that cannot be
+ * extracted from the packet itself.
+ */
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+                              const struct nlattr *attr)
+{
+       const struct nlattr *nla;
+       int rem;
+
+       *in_port = USHRT_MAX;
+       *priority = 0;
+
+       nla_for_each_nested(nla, attr, rem) {
+               int type = nla_type(nla);
+
+               if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
+                       if (nla_len(nla) != ovs_key_lens[type])
+                               return -EINVAL;
+
+                       switch (type) {
+                       case OVS_KEY_ATTR_PRIORITY:
+                               *priority = nla_get_u32(nla);
+                               break;
+
+                       case OVS_KEY_ATTR_IN_PORT:
+                               if (nla_get_u32(nla) >= DP_MAX_PORTS)
+                                       return -EINVAL;
+                               *in_port = nla_get_u32(nla);
+                               break;
+                       }
+               }
+       }
+       if (rem)
+               return -EINVAL;
+       return 0;
+}
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+{
+       struct ovs_key_ethernet *eth_key;
+       struct nlattr *nla, *encap;
+
+       if (swkey->phy.priority)
+               NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority);
+
+       if (swkey->phy.in_port != USHRT_MAX)
+               NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port);
+
+       nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
+       if (!nla)
+               goto nla_put_failure;
+       eth_key = nla_data(nla);
+       memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
+       memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
+
+       if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
+               NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q));
+               NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci);
+               encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+               if (!swkey->eth.tci)
+                       goto unencap;
+       } else {
+               encap = NULL;
+       }
+
+       if (swkey->eth.type == htons(ETH_P_802_2))
+               goto unencap;
+
+       NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type);
+
+       if (swkey->eth.type == htons(ETH_P_IP)) {
+               struct ovs_key_ipv4 *ipv4_key;
+
+               nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
+               if (!nla)
+                       goto nla_put_failure;
+               ipv4_key = nla_data(nla);
+               ipv4_key->ipv4_src = swkey->ipv4.addr.src;
+               ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
+               ipv4_key->ipv4_proto = swkey->ip.proto;
+               ipv4_key->ipv4_tos = swkey->ip.tos;
+               ipv4_key->ipv4_ttl = swkey->ip.ttl;
+               ipv4_key->ipv4_frag = swkey->ip.frag;
+       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+               struct ovs_key_ipv6 *ipv6_key;
+
+               nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
+               if (!nla)
+                       goto nla_put_failure;
+               ipv6_key = nla_data(nla);
+               memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
+                               sizeof(ipv6_key->ipv6_src));
+               memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
+                               sizeof(ipv6_key->ipv6_dst));
+               ipv6_key->ipv6_label = swkey->ipv6.label;
+               ipv6_key->ipv6_proto = swkey->ip.proto;
+               ipv6_key->ipv6_tclass = swkey->ip.tos;
+               ipv6_key->ipv6_hlimit = swkey->ip.ttl;
+               ipv6_key->ipv6_frag = swkey->ip.frag;
+       } else if (swkey->eth.type == htons(ETH_P_ARP)) {
+               struct ovs_key_arp *arp_key;
+
+               nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
+               if (!nla)
+                       goto nla_put_failure;
+               arp_key = nla_data(nla);
+               memset(arp_key, 0, sizeof(struct ovs_key_arp));
+               arp_key->arp_sip = swkey->ipv4.addr.src;
+               arp_key->arp_tip = swkey->ipv4.addr.dst;
+               arp_key->arp_op = htons(swkey->ip.proto);
+               memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
+               memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
+       }
+
+       if ((swkey->eth.type == htons(ETH_P_IP) ||
+            swkey->eth.type == htons(ETH_P_IPV6)) &&
+            swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
+
+               if (swkey->ip.proto == IPPROTO_TCP) {
+                       struct ovs_key_tcp *tcp_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       tcp_key = nla_data(nla);
+                       if (swkey->eth.type == htons(ETH_P_IP)) {
+                               tcp_key->tcp_src = swkey->ipv4.tp.src;
+                               tcp_key->tcp_dst = swkey->ipv4.tp.dst;
+                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+                               tcp_key->tcp_src = swkey->ipv6.tp.src;
+                               tcp_key->tcp_dst = swkey->ipv6.tp.dst;
+                       }
+               } else if (swkey->ip.proto == IPPROTO_UDP) {
+                       struct ovs_key_udp *udp_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       udp_key = nla_data(nla);
+                       if (swkey->eth.type == htons(ETH_P_IP)) {
+                               udp_key->udp_src = swkey->ipv4.tp.src;
+                               udp_key->udp_dst = swkey->ipv4.tp.dst;
+                       } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+                               udp_key->udp_src = swkey->ipv6.tp.src;
+                               udp_key->udp_dst = swkey->ipv6.tp.dst;
+                       }
+               } else if (swkey->eth.type == htons(ETH_P_IP) &&
+                          swkey->ip.proto == IPPROTO_ICMP) {
+                       struct ovs_key_icmp *icmp_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       icmp_key = nla_data(nla);
+                       icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
+                       icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
+               } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
+                          swkey->ip.proto == IPPROTO_ICMPV6) {
+                       struct ovs_key_icmpv6 *icmpv6_key;
+
+                       nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
+                                               sizeof(*icmpv6_key));
+                       if (!nla)
+                               goto nla_put_failure;
+                       icmpv6_key = nla_data(nla);
+                       icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
+                       icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
+
+                       if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
+                           icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+                               struct ovs_key_nd *nd_key;
+
+                               nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
+                               if (!nla)
+                                       goto nla_put_failure;
+                               nd_key = nla_data(nla);
+                               memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
+                                                       sizeof(nd_key->nd_target));
+                               memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
+                               memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
+                       }
+               }
+       }
+
+unencap:
+       if (encap)
+               nla_nest_end(skb, encap);
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+/* Initializes the flow module.
+ * Returns zero if successful or a negative error code. */
+int ovs_flow_init(void)
+{
+       flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
+                                       0, NULL);
+       if (flow_cache == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/* Uninitializes the flow module. */
+void ovs_flow_exit(void)
+{
+       kmem_cache_destroy(flow_cache);
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
new file mode 100644 (file)
index 0000000..2747dc2
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef FLOW_H
+#define FLOW_H 1
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/openvswitch.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/flex_array.h>
+#include <net/inet_ecn.h>
+
+struct sk_buff;
+
+struct sw_flow_actions {
+       struct rcu_head rcu;
+       u32 actions_len;
+       struct nlattr actions[];
+};
+
+struct sw_flow_key {
+       struct {
+               u32     priority;       /* Packet QoS priority. */
+               u16     in_port;        /* Input switch port (or USHRT_MAX). */
+       } phy;
+       struct {
+               u8     src[ETH_ALEN];   /* Ethernet source address. */
+               u8     dst[ETH_ALEN];   /* Ethernet destination address. */
+               __be16 tci;             /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+               __be16 type;            /* Ethernet frame type. */
+       } eth;
+       struct {
+               u8     proto;           /* IP protocol or lower 8 bits of ARP opcode. */
+               u8     tos;             /* IP ToS. */
+               u8     ttl;             /* IP TTL/hop limit. */
+               u8     frag;            /* One of OVS_FRAG_TYPE_*. */
+       } ip;
+       union {
+               struct {
+                       struct {
+                               __be32 src;     /* IP source address. */
+                               __be32 dst;     /* IP destination address. */
+                       } addr;
+                       union {
+                               struct {
+                                       __be16 src;             /* TCP/UDP source port. */
+                                       __be16 dst;             /* TCP/UDP destination port. */
+                               } tp;
+                               struct {
+                                       u8 sha[ETH_ALEN];       /* ARP source hardware address. */
+                                       u8 tha[ETH_ALEN];       /* ARP target hardware address. */
+                               } arp;
+                       };
+               } ipv4;
+               struct {
+                       struct {
+                               struct in6_addr src;    /* IPv6 source address. */
+                               struct in6_addr dst;    /* IPv6 destination address. */
+                       } addr;
+                       __be32 label;                   /* IPv6 flow label. */
+                       struct {
+                               __be16 src;             /* TCP/UDP source port. */
+                               __be16 dst;             /* TCP/UDP destination port. */
+                       } tp;
+                       struct {
+                               struct in6_addr target; /* ND target address. */
+                               u8 sll[ETH_ALEN];       /* ND source link layer address. */
+                               u8 tll[ETH_ALEN];       /* ND target link layer address. */
+                       } nd;
+               } ipv6;
+       };
+};
+
+struct sw_flow {
+       struct rcu_head rcu;
+       struct hlist_node hash_node[2];
+       u32 hash;
+
+       struct sw_flow_key key;
+       struct sw_flow_actions __rcu *sf_acts;
+
+       spinlock_t lock;        /* Lock for values below. */
+       unsigned long used;     /* Last used time (in jiffies). */
+       u64 packet_count;       /* Number of packets matched. */
+       u64 byte_count;         /* Number of bytes matched. */
+       u8 tcp_flags;           /* Union of seen TCP flags. */
+};
+
+struct arp_eth_header {
+       __be16      ar_hrd;     /* format of hardware address   */
+       __be16      ar_pro;     /* format of protocol address   */
+       unsigned char   ar_hln; /* length of hardware address   */
+       unsigned char   ar_pln; /* length of protocol address   */
+       __be16      ar_op;      /* ARP opcode (command)     */
+
+       /* Ethernet+IPv4 specific members. */
+       unsigned char       ar_sha[ETH_ALEN];   /* sender hardware address  */
+       unsigned char       ar_sip[4];          /* sender IP address        */
+       unsigned char       ar_tha[ETH_ALEN];   /* target hardware address  */
+       unsigned char       ar_tip[4];          /* target IP address        */
+} __packed;
+
+int ovs_flow_init(void);
+void ovs_flow_exit(void);
+
+struct sw_flow *ovs_flow_alloc(void);
+void ovs_flow_deferred_free(struct sw_flow *);
+void ovs_flow_free(struct sw_flow *flow);
+
+struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
+void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
+
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
+                    int *key_lenp);
+void ovs_flow_used(struct sw_flow *, struct sk_buff *);
+u64 ovs_flow_used_time(unsigned long flow_jiffies);
+
+/* Upper bound on the length of a nlattr-formatted flow key.  The longest
+ * nlattr-formatted flow key would be:
+ *
+ *                         struct  pad  nl hdr  total
+ *                         ------  ---  ------  -----
+ *  OVS_KEY_ATTR_PRIORITY      4    --     4      8
+ *  OVS_KEY_ATTR_IN_PORT       4    --     4      8
+ *  OVS_KEY_ATTR_ETHERNET     12    --     4     16
+ *  OVS_KEY_ATTR_8021Q         4    --     4      8
+ *  OVS_KEY_ATTR_ETHERTYPE     2     2     4      8
+ *  OVS_KEY_ATTR_IPV6         40    --     4     44
+ *  OVS_KEY_ATTR_ICMPV6        2     2     4      8
+ *  OVS_KEY_ATTR_ND           28    --     4     32
+ *  -------------------------------------------------
+ *  total                                       132
+ */
+#define FLOW_BUFSIZE 132
+
+int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
+int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+                     const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
+                              const struct nlattr *);
+
+#define TBL_MIN_BUCKETS                1024
+
+struct flow_table {
+       struct flex_array *buckets;
+       unsigned int count, n_buckets;
+       struct rcu_head rcu;
+       int node_ver;
+       u32 hash_seed;
+       bool keep_flows;
+};
+
+static inline int ovs_flow_tbl_count(struct flow_table *table)
+{
+       return table->count;
+}
+
+static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
+{
+       return (table->count > table->n_buckets);
+}
+
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
+                                   struct sw_flow_key *key, int len);
+void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_alloc(int new_size);
+struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
+struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
+u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
+
+struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
+
+#endif /* flow.h */
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
new file mode 100644 (file)
index 0000000..8fc28b8
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/hardirq.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+struct internal_dev {
+       struct vport *vport;
+};
+
+static struct internal_dev *internal_dev_priv(struct net_device *netdev)
+{
+       return netdev_priv(netdev);
+}
+
+/* This function is only called by the kernel network layer.*/
+static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
+                                                       struct rtnl_link_stats64 *stats)
+{
+       struct vport *vport = ovs_internal_dev_get_vport(netdev);
+       struct ovs_vport_stats vport_stats;
+
+       ovs_vport_get_stats(vport, &vport_stats);
+
+       /* The tx and rx stats need to be swapped because the
+        * switch and host OS have opposite perspectives. */
+       stats->rx_packets       = vport_stats.tx_packets;
+       stats->tx_packets       = vport_stats.rx_packets;
+       stats->rx_bytes         = vport_stats.tx_bytes;
+       stats->tx_bytes         = vport_stats.rx_bytes;
+       stats->rx_errors        = vport_stats.tx_errors;
+       stats->tx_errors        = vport_stats.rx_errors;
+       stats->rx_dropped       = vport_stats.tx_dropped;
+       stats->tx_dropped       = vport_stats.rx_dropped;
+
+       return stats;
+}
+
+static int internal_dev_mac_addr(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       return 0;
+}
+
+/* Called with rcu_read_lock_bh. */
+static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       rcu_read_lock();
+       ovs_vport_receive(internal_dev_priv(netdev)->vport, skb);
+       rcu_read_unlock();
+       return 0;
+}
+
+static int internal_dev_open(struct net_device *netdev)
+{
+       netif_start_queue(netdev);
+       return 0;
+}
+
+static int internal_dev_stop(struct net_device *netdev)
+{
+       netif_stop_queue(netdev);
+       return 0;
+}
+
+static void internal_dev_getinfo(struct net_device *netdev,
+                                struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, "openvswitch");
+}
+
+static const struct ethtool_ops internal_dev_ethtool_ops = {
+       .get_drvinfo    = internal_dev_getinfo,
+       .get_link       = ethtool_op_get_link,
+};
+
+static int internal_dev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       if (new_mtu < 68)
+               return -EINVAL;
+
+       netdev->mtu = new_mtu;
+       return 0;
+}
+
+static void internal_dev_destructor(struct net_device *dev)
+{
+       struct vport *vport = ovs_internal_dev_get_vport(dev);
+
+       ovs_vport_free(vport);
+       free_netdev(dev);
+}
+
+static const struct net_device_ops internal_dev_netdev_ops = {
+       .ndo_open = internal_dev_open,
+       .ndo_stop = internal_dev_stop,
+       .ndo_start_xmit = internal_dev_xmit,
+       .ndo_set_mac_address = internal_dev_mac_addr,
+       .ndo_change_mtu = internal_dev_change_mtu,
+       .ndo_get_stats64 = internal_dev_get_stats,
+};
+
+static void do_setup(struct net_device *netdev)
+{
+       ether_setup(netdev);
+
+       netdev->netdev_ops = &internal_dev_netdev_ops;
+
+       netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       netdev->destructor = internal_dev_destructor;
+       SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
+       netdev->tx_queue_len = 0;
+
+       netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
+                               NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO;
+
+       netdev->vlan_features = netdev->features;
+       netdev->features |= NETIF_F_HW_VLAN_TX;
+       netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+       random_ether_addr(netdev->dev_addr);
+}
+
+static struct vport *internal_dev_create(const struct vport_parms *parms)
+{
+       struct vport *vport;
+       struct netdev_vport *netdev_vport;
+       struct internal_dev *internal_dev;
+       int err;
+
+       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+                               &ovs_internal_vport_ops, parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               goto error;
+       }
+
+       netdev_vport = netdev_vport_priv(vport);
+
+       netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
+                                        parms->name, do_setup);
+       if (!netdev_vport->dev) {
+               err = -ENOMEM;
+               goto error_free_vport;
+       }
+
+       internal_dev = internal_dev_priv(netdev_vport->dev);
+       internal_dev->vport = vport;
+
+       err = register_netdevice(netdev_vport->dev);
+       if (err)
+               goto error_free_netdev;
+
+       dev_set_promiscuity(netdev_vport->dev, 1);
+       netif_start_queue(netdev_vport->dev);
+
+       return vport;
+
+error_free_netdev:
+       free_netdev(netdev_vport->dev);
+error_free_vport:
+       ovs_vport_free(vport);
+error:
+       return ERR_PTR(err);
+}
+
+static void internal_dev_destroy(struct vport *vport)
+{
+       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+       netif_stop_queue(netdev_vport->dev);
+       dev_set_promiscuity(netdev_vport->dev, -1);
+
+       /* unregister_netdevice() waits for an RCU grace period. */
+       unregister_netdevice(netdev_vport->dev);
+}
+
+static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+{
+       struct net_device *netdev = netdev_vport_priv(vport)->dev;
+       int len;
+
+       len = skb->len;
+       skb->dev = netdev;
+       skb->pkt_type = PACKET_HOST;
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       netif_rx(skb);
+
+       return len;
+}
+
+const struct vport_ops ovs_internal_vport_ops = {
+       .type           = OVS_VPORT_TYPE_INTERNAL,
+       .create         = internal_dev_create,
+       .destroy        = internal_dev_destroy,
+       .get_name       = ovs_netdev_get_name,
+       .get_ifindex    = ovs_netdev_get_ifindex,
+       .send           = internal_dev_recv,
+};
+
+int ovs_is_internal_dev(const struct net_device *netdev)
+{
+       return netdev->netdev_ops == &internal_dev_netdev_ops;
+}
+
+struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
+{
+       if (!ovs_is_internal_dev(netdev))
+               return NULL;
+
+       return internal_dev_priv(netdev)->vport;
+}
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
new file mode 100644 (file)
index 0000000..3454447
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_INTERNAL_DEV_H
+#define VPORT_INTERNAL_DEV_H 1
+
+#include "datapath.h"
+#include "vport.h"
+
+int ovs_is_internal_dev(const struct net_device *);
+struct vport *ovs_internal_dev_get_vport(struct net_device *);
+
+#endif /* vport-internal_dev.h */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
new file mode 100644 (file)
index 0000000..c1068ae
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/if_arp.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/llc.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff.h>
+
+#include <net/llc.h>
+
+#include "datapath.h"
+#include "vport-internal_dev.h"
+#include "vport-netdev.h"
+
+/* Must be called with rcu_read_lock. */
+static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+{
+       if (unlikely(!vport)) {
+               kfree_skb(skb);
+               return;
+       }
+
+       /* Make our own copy of the packet.  Otherwise we will mangle the
+        * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
+        * (No one comes after us, since we tell handle_bridge() that we took
+        * the packet.) */
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (unlikely(!skb))
+               return;
+
+       skb_push(skb, ETH_HLEN);
+       ovs_vport_receive(vport, skb);
+}
+
+/* Called with rcu_read_lock and bottom-halves disabled. */
+static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct vport *vport;
+
+       if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+               return RX_HANDLER_PASS;
+
+       vport = ovs_netdev_get_vport(skb->dev);
+
+       netdev_port_receive(vport, skb);
+
+       return RX_HANDLER_CONSUMED;
+}
+
+static struct vport *netdev_create(const struct vport_parms *parms)
+{
+       struct vport *vport;
+       struct netdev_vport *netdev_vport;
+       int err;
+
+       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
+                               &ovs_netdev_vport_ops, parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               goto error;
+       }
+
+       netdev_vport = netdev_vport_priv(vport);
+
+       netdev_vport->dev = dev_get_by_name(&init_net, parms->name);
+       if (!netdev_vport->dev) {
+               err = -ENODEV;
+               goto error_free_vport;
+       }
+
+       if (netdev_vport->dev->flags & IFF_LOOPBACK ||
+           netdev_vport->dev->type != ARPHRD_ETHER ||
+           ovs_is_internal_dev(netdev_vport->dev)) {
+               err = -EINVAL;
+               goto error_put;
+       }
+
+       err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+                                        vport);
+       if (err)
+               goto error_put;
+
+       dev_set_promiscuity(netdev_vport->dev, 1);
+       netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+
+       return vport;
+
+error_put:
+       dev_put(netdev_vport->dev);
+error_free_vport:
+       ovs_vport_free(vport);
+error:
+       return ERR_PTR(err);
+}
+
+static void netdev_destroy(struct vport *vport)
+{
+       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+
+       netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+       netdev_rx_handler_unregister(netdev_vport->dev);
+       dev_set_promiscuity(netdev_vport->dev, -1);
+
+       synchronize_rcu();
+
+       dev_put(netdev_vport->dev);
+       ovs_vport_free(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *vport)
+{
+       const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+       return netdev_vport->dev->name;
+}
+
+int ovs_netdev_get_ifindex(const struct vport *vport)
+{
+       const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+       return netdev_vport->dev->ifindex;
+}
+
+static unsigned packet_length(const struct sk_buff *skb)
+{
+       unsigned length = skb->len - ETH_HLEN;
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               length -= VLAN_HLEN;
+
+       return length;
+}
+
+static int netdev_send(struct vport *vport, struct sk_buff *skb)
+{
+       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
+       int mtu = netdev_vport->dev->mtu;
+       int len;
+
+       if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+               if (net_ratelimit())
+                       pr_warn("%s: dropped over-mtu packet: %d > %d\n",
+                               ovs_dp_name(vport->dp), packet_length(skb), mtu);
+               goto error;
+       }
+
+       if (unlikely(skb_warn_if_lro(skb)))
+               goto error;
+
+       skb->dev = netdev_vport->dev;
+       len = skb->len;
+       dev_queue_xmit(skb);
+
+       return len;
+
+error:
+       kfree_skb(skb);
+       ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
+       return 0;
+}
+
+/* Returns null if this device is not attached to a datapath. */
+struct vport *ovs_netdev_get_vport(struct net_device *dev)
+{
+       if (likely(dev->priv_flags & IFF_OVS_DATAPATH))
+               return (struct vport *)
+                       rcu_dereference_rtnl(dev->rx_handler_data);
+       else
+               return NULL;
+}
+
+const struct vport_ops ovs_netdev_vport_ops = {
+       .type           = OVS_VPORT_TYPE_NETDEV,
+       .create         = netdev_create,
+       .destroy        = netdev_destroy,
+       .get_name       = ovs_netdev_get_name,
+       .get_ifindex    = ovs_netdev_get_ifindex,
+       .send           = netdev_send,
+};
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
new file mode 100644 (file)
index 0000000..fd9b008
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_NETDEV_H
+#define VPORT_NETDEV_H 1
+
+#include <linux/netdevice.h>
+
+#include "vport.h"
+
+struct vport *ovs_netdev_get_vport(struct net_device *dev);
+
+struct netdev_vport {
+       struct net_device *dev;
+};
+
+static inline struct netdev_vport *
+netdev_vport_priv(const struct vport *vport)
+{
+       return vport_priv(vport);
+}
+
+const char *ovs_netdev_get_name(const struct vport *);
+const char *ovs_netdev_get_config(const struct vport *);
+int ovs_netdev_get_ifindex(const struct vport *);
+
+#endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
new file mode 100644 (file)
index 0000000..7f0ef37
--- /dev/null
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include <linux/dcache.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/compat.h>
+#include <linux/version.h>
+
+#include "vport.h"
+#include "vport-internal_dev.h"
+
+/* List of statically compiled vport implementations.  Don't forget to also
+ * add yours to the list at the bottom of vport.h. */
+static const struct vport_ops *vport_ops_list[] = {
+       &ovs_netdev_vport_ops,
+       &ovs_internal_vport_ops,
+};
+
+/* Protected by RCU read lock for reading, RTNL lock for writing. */
+static struct hlist_head *dev_table;
+#define VPORT_HASH_BUCKETS 1024
+
+/**
+ *     ovs_vport_init - initialize vport subsystem
+ *
+ * Called at module load time to initialize the vport subsystem.
+ */
+int ovs_vport_init(void)
+{
+       dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
+                           GFP_KERNEL);
+       if (!dev_table)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ *     ovs_vport_exit - shutdown vport subsystem
+ *
+ * Called at module exit time to shutdown the vport subsystem.
+ */
+void ovs_vport_exit(void)
+{
+       kfree(dev_table);
+}
+
+static struct hlist_head *hash_bucket(const char *name)
+{
+       unsigned int hash = full_name_hash(name, strlen(name));
+       return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
+}
+
+/**
+ *     ovs_vport_locate - find a port that has already been created
+ *
+ * @name: name of port to find
+ *
+ * Must be called with RTNL or RCU read lock.
+ */
+struct vport *ovs_vport_locate(const char *name)
+{
+       struct hlist_head *bucket = hash_bucket(name);
+       struct vport *vport;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_rcu(vport, node, bucket, hash_node)
+               if (!strcmp(name, vport->ops->get_name(vport)))
+                       return vport;
+
+       return NULL;
+}
+
+/**
+ *     ovs_vport_alloc - allocate and initialize new vport
+ *
+ * @priv_size: Size of private data area to allocate.
+ * @ops: vport device ops
+ *
+ * Allocate and initialize a new vport defined by @ops.  The vport will contain
+ * a private data area of size @priv_size that can be accessed using
+ * vport_priv().  vports that are no longer needed should be released with
+ * vport_free().
+ */
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
+                         const struct vport_parms *parms)
+{
+       struct vport *vport;
+       size_t alloc_size;
+
+       alloc_size = sizeof(struct vport);
+       if (priv_size) {
+               alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
+               alloc_size += priv_size;
+       }
+
+       vport = kzalloc(alloc_size, GFP_KERNEL);
+       if (!vport)
+               return ERR_PTR(-ENOMEM);
+
+       vport->dp = parms->dp;
+       vport->port_no = parms->port_no;
+       vport->upcall_pid = parms->upcall_pid;
+       vport->ops = ops;
+
+       vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
+       if (!vport->percpu_stats) {
+               kfree(vport);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       spin_lock_init(&vport->stats_lock);
+
+       return vport;
+}
+
+/**
+ *     ovs_vport_free - uninitialize and free vport
+ *
+ * @vport: vport to free
+ *
+ * Frees a vport allocated with vport_alloc() when it is no longer needed.
+ *
+ * The caller must ensure that an RCU grace period has passed since the last
+ * time @vport was in a datapath.
+ */
+void ovs_vport_free(struct vport *vport)
+{
+       free_percpu(vport->percpu_stats);
+       kfree(vport);
+}
+
+/**
+ *     ovs_vport_add - add vport device (for kernel callers)
+ *
+ * @parms: Information about new vport.
+ *
+ * Creates a new vport with the specified configuration (which is dependent on
+ * device type).  RTNL lock must be held.
+ */
+struct vport *ovs_vport_add(const struct vport_parms *parms)
+{
+       struct vport *vport;
+       int err = 0;
+       int i;
+
+       ASSERT_RTNL();
+
+       for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
+               if (vport_ops_list[i]->type == parms->type) {
+                       vport = vport_ops_list[i]->create(parms);
+                       if (IS_ERR(vport)) {
+                               err = PTR_ERR(vport);
+                               goto out;
+                       }
+
+                       hlist_add_head_rcu(&vport->hash_node,
+                                          hash_bucket(vport->ops->get_name(vport)));
+                       return vport;
+               }
+       }
+
+       err = -EAFNOSUPPORT;
+
+out:
+       return ERR_PTR(err);
+}
+
+/**
+ *     ovs_vport_set_options - modify existing vport device (for kernel callers)
+ *
+ * @vport: vport to modify.
+ * @port: New configuration.
+ *
+ * Modifies an existing device with the specified configuration (which is
+ * dependent on device type).  RTNL lock must be held.
+ */
+int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
+{
+       ASSERT_RTNL();
+
+       if (!vport->ops->set_options)
+               return -EOPNOTSUPP;
+       return vport->ops->set_options(vport, options);
+}
+
+/**
+ *     ovs_vport_del - delete existing vport device
+ *
+ * @vport: vport to delete.
+ *
+ * Detaches @vport from its datapath and destroys it.  It is possible to fail
+ * for reasons such as lack of memory.  RTNL lock must be held.
+ */
+void ovs_vport_del(struct vport *vport)
+{
+       ASSERT_RTNL();
+
+       hlist_del_rcu(&vport->hash_node);
+
+       vport->ops->destroy(vport);
+}
+
+/**
+ *     ovs_vport_get_stats - retrieve device stats
+ *
+ * @vport: vport from which to retrieve the stats
+ * @stats: location to store stats
+ *
+ * Retrieves transmit, receive, and error stats for the given device.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
+{
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       /* We potentially have 2 sources of stats that need to be combined:
+        * those we have collected (split into err_stats and percpu_stats) from
+        * set_stats() and device error stats from netdev->get_stats() (for
+        * errors that happen  downstream and therefore aren't reported through
+        * our vport_record_error() function).
+        * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
+        * netdev-stats can be directly read over netlink-ioctl.
+        */
+
+       spin_lock_bh(&vport->stats_lock);
+
+       stats->rx_errors        = vport->err_stats.rx_errors;
+       stats->tx_errors        = vport->err_stats.tx_errors;
+       stats->tx_dropped       = vport->err_stats.tx_dropped;
+       stats->rx_dropped       = vport->err_stats.rx_dropped;
+
+       spin_unlock_bh(&vport->stats_lock);
+
+       for_each_possible_cpu(i) {
+               const struct vport_percpu_stats *percpu_stats;
+               struct vport_percpu_stats local_stats;
+               unsigned int start;
+
+               percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
+
+               do {
+                       start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+                       local_stats = *percpu_stats;
+               } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+
+               stats->rx_bytes         += local_stats.rx_bytes;
+               stats->rx_packets       += local_stats.rx_packets;
+               stats->tx_bytes         += local_stats.tx_bytes;
+               stats->tx_packets       += local_stats.tx_packets;
+       }
+}
+
+/**
+ *     ovs_vport_get_options - retrieve device options
+ *
+ * @vport: vport from which to retrieve the options.
+ * @skb: sk_buff where options should be appended.
+ *
+ * Retrieves the configuration of the given device, appending an
+ * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
+ * vport-specific attributes to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
+ * negative error code if a real error occurred.  If an error occurs, @skb is
+ * left unmodified.
+ *
+ * Must be called with RTNL lock or rcu_read_lock.
+ */
+int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+       struct nlattr *nla;
+
+       nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
+       if (!nla)
+               return -EMSGSIZE;
+
+       if (vport->ops->get_options) {
+               int err = vport->ops->get_options(vport, skb);
+               if (err) {
+                       nla_nest_cancel(skb, nla);
+                       return err;
+               }
+       }
+
+       nla_nest_end(skb, nla);
+       return 0;
+}
+
+/**
+ *     ovs_vport_receive - pass up received packet to the datapath for processing
+ *
+ * @vport: vport that received the packet
+ * @skb: skb that was received
+ *
+ * Must be called with rcu_read_lock.  The packet cannot be shared and
+ * skb->data should point to the Ethernet header.  The caller must have already
+ * called compute_ip_summed() to initialize the checksumming fields.
+ */
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+{
+       struct vport_percpu_stats *stats;
+
+       stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+       u64_stats_update_begin(&stats->sync);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->sync);
+
+       ovs_dp_process_received_packet(vport, skb);
+}
+
+/**
+ *     ovs_vport_send - send a packet on a device
+ *
+ * @vport: vport on which to send the packet
+ * @skb: skb to send
+ *
+ * Sends the given packet and returns the length of data sent.  Either RTNL
+ * lock or rcu_read_lock must be held.
+ */
+int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+       int sent = vport->ops->send(vport, skb);
+
+       if (likely(sent)) {
+               struct vport_percpu_stats *stats;
+
+               stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
+
+               u64_stats_update_begin(&stats->sync);
+               stats->tx_packets++;
+               stats->tx_bytes += sent;
+               u64_stats_update_end(&stats->sync);
+       }
+       return sent;
+}
+
+/**
+ *     ovs_vport_record_error - indicate device error to generic stats layer
+ *
+ * @vport: vport that encountered the error
+ * @err_type: one of enum vport_err_type types to indicate the error type
+ *
+ * If using the vport generic stats layer indicate that an error of the given
+ * type has occured.
+ */
+void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
+{
+       spin_lock(&vport->stats_lock);
+
+       switch (err_type) {
+       case VPORT_E_RX_DROPPED:
+               vport->err_stats.rx_dropped++;
+               break;
+
+       case VPORT_E_RX_ERROR:
+               vport->err_stats.rx_errors++;
+               break;
+
+       case VPORT_E_TX_DROPPED:
+               vport->err_stats.tx_dropped++;
+               break;
+
+       case VPORT_E_TX_ERROR:
+               vport->err_stats.tx_errors++;
+               break;
+       };
+
+       spin_unlock(&vport->stats_lock);
+}
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
new file mode 100644 (file)
index 0000000..1960962
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef VPORT_H
+#define VPORT_H 1
+
+#include <linux/list.h>
+#include <linux/openvswitch.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/u64_stats_sync.h>
+
+#include "datapath.h"
+
+struct vport;
+struct vport_parms;
+
+/* The following definitions are for users of the vport subsytem: */
+
+int ovs_vport_init(void);
+void ovs_vport_exit(void);
+
+struct vport *ovs_vport_add(const struct vport_parms *);
+void ovs_vport_del(struct vport *);
+
+struct vport *ovs_vport_locate(const char *name);
+
+void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
+
+int ovs_vport_set_options(struct vport *, struct nlattr *options);
+int ovs_vport_get_options(const struct vport *, struct sk_buff *);
+
+int ovs_vport_send(struct vport *, struct sk_buff *);
+
+/* The following definitions are for implementers of vport devices: */
+
+struct vport_percpu_stats {
+       u64 rx_bytes;
+       u64 rx_packets;
+       u64 tx_bytes;
+       u64 tx_packets;
+       struct u64_stats_sync sync;
+};
+
+struct vport_err_stats {
+       u64 rx_dropped;
+       u64 rx_errors;
+       u64 tx_dropped;
+       u64 tx_errors;
+};
+
+/**
+ * struct vport - one port within a datapath
+ * @rcu: RCU callback head for deferred destruction.
+ * @port_no: Index into @dp's @ports array.
+ * @dp: Datapath to which this port belongs.
+ * @node: Element in @dp's @port_list.
+ * @upcall_pid: The Netlink port to use for packets received on this port that
+ * miss the flow table.
+ * @hash_node: Element in @dev_table hash table in vport.c.
+ * @ops: Class structure.
+ * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+ * @stats_lock: Protects @err_stats;
+ * @err_stats: Points to error statistics used and maintained by vport
+ */
+struct vport {
+       struct rcu_head rcu;
+       u16 port_no;
+       struct datapath *dp;
+       struct list_head node;
+       u32 upcall_pid;
+
+       struct hlist_node hash_node;
+       const struct vport_ops *ops;
+
+       struct vport_percpu_stats __percpu *percpu_stats;
+
+       spinlock_t stats_lock;
+       struct vport_err_stats err_stats;
+};
+
+/**
+ * struct vport_parms - parameters for creating a new vport
+ *
+ * @name: New vport's name.
+ * @type: New vport's type.
+ * @options: %OVS_VPORT_ATTR_OPTIONS attribute from Netlink message, %NULL if
+ * none was supplied.
+ * @dp: New vport's datapath.
+ * @port_no: New vport's port number.
+ */
+struct vport_parms {
+       const char *name;
+       enum ovs_vport_type type;
+       struct nlattr *options;
+
+       /* For ovs_vport_alloc(). */
+       struct datapath *dp;
+       u16 port_no;
+       u32 upcall_pid;
+};
+
+/**
+ * struct vport_ops - definition of a type of virtual port
+ *
+ * @type: %OVS_VPORT_TYPE_* value for this type of virtual port.
+ * @create: Create a new vport configured as specified.  On success returns
+ * a new vport allocated with ovs_vport_alloc(), otherwise an ERR_PTR() value.
+ * @destroy: Destroys a vport.  Must call vport_free() on the vport but not
+ * before an RCU grace period has elapsed.
+ * @set_options: Modify the configuration of an existing vport.  May be %NULL
+ * if modification is not supported.
+ * @get_options: Appends vport-specific attributes for the configuration of an
+ * existing vport to a &struct sk_buff.  May be %NULL for a vport that does not
+ * have any configuration.
+ * @get_name: Get the device's name.
+ * @get_config: Get the device's configuration.
+ * @get_ifindex: Get the system interface index associated with the device.
+ * May be null if the device does not have an ifindex.
+ * @send: Send a packet on the device.  Returns the length of the packet sent.
+ */
+struct vport_ops {
+       enum ovs_vport_type type;
+
+       /* Called with RTNL lock. */
+       struct vport *(*create)(const struct vport_parms *);
+       void (*destroy)(struct vport *);
+
+       int (*set_options)(struct vport *, struct nlattr *);
+       int (*get_options)(const struct vport *, struct sk_buff *);
+
+       /* Called with rcu_read_lock or RTNL lock. */
+       const char *(*get_name)(const struct vport *);
+       void (*get_config)(const struct vport *, void *);
+       int (*get_ifindex)(const struct vport *);
+
+       int (*send)(struct vport *, struct sk_buff *);
+};
+
+enum vport_err_type {
+       VPORT_E_RX_DROPPED,
+       VPORT_E_RX_ERROR,
+       VPORT_E_TX_DROPPED,
+       VPORT_E_TX_ERROR,
+};
+
+struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
+                             const struct vport_parms *);
+void ovs_vport_free(struct vport *);
+
+#define VPORT_ALIGN 8
+
+/**
+ *     vport_priv - access private data area of vport
+ *
+ * @vport: vport to access
+ *
+ * If a nonzero size was passed in priv_size of vport_alloc() a private data
+ * area was allocated on creation.  This allows that area to be accessed and
+ * used for any purpose needed by the vport implementer.
+ */
+static inline void *vport_priv(const struct vport *vport)
+{
+       return (u8 *)vport + ALIGN(sizeof(struct vport), VPORT_ALIGN);
+}
+
+/**
+ *     vport_from_priv - lookup vport from private data pointer
+ *
+ * @priv: Start of private data area.
+ *
+ * It is sometimes useful to translate from a pointer to the private data
+ * area to the vport, such as in the case where the private data pointer is
+ * the result of a hash table lookup.  @priv must point to the start of the
+ * private data area.
+ */
+static inline struct vport *vport_from_priv(const void *priv)
+{
+       return (struct vport *)(priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
+}
+
+void ovs_vport_receive(struct vport *, struct sk_buff *);
+void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
+
+/* List of statically compiled vport implementations.  Don't forget to also
+ * add yours to the list at the top of vport.c. */
+extern const struct vport_ops ovs_netdev_vport_ops;
+extern const struct vport_ops ovs_internal_vport_ops;
+
+#endif /* vport.h */
index 82a6f34..0da505c 100644 (file)
@@ -1499,10 +1499,11 @@ retry:
 
        if (!skb) {
                size_t reserved = LL_RESERVED_SPACE(dev);
+               int tlen = dev->needed_tailroom;
                unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
 
                rcu_read_unlock();
-               skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+               skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
                if (skb == NULL)
                        return -ENOBUFS;
                /* FIXME: Save some space for broken drivers that write a hard
@@ -1944,7 +1945,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
 
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                void *frame, struct net_device *dev, int size_max,
-               __be16 proto, unsigned char *addr)
+               __be16 proto, unsigned char *addr, int hlen)
 {
        union {
                struct tpacket_hdr *h1;
@@ -1978,7 +1979,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                return -EMSGSIZE;
        }
 
-       skb_reserve(skb, LL_RESERVED_SPACE(dev));
+       skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
 
        data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2053,6 +2054,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        unsigned char *addr;
        int len_sum = 0;
        int status = 0;
+       int hlen, tlen;
 
        mutex_lock(&po->pg_vec_lock);
 
@@ -2101,16 +2103,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                }
 
                status = TP_STATUS_SEND_REQUEST;
+               hlen = LL_RESERVED_SPACE(dev);
+               tlen = dev->needed_tailroom;
                skb = sock_alloc_send_skb(&po->sk,
-                               LL_ALLOCATED_SPACE(dev)
-                               + sizeof(struct sockaddr_ll),
+                               hlen + tlen + sizeof(struct sockaddr_ll),
                                0, &err);
 
                if (unlikely(skb == NULL))
                        goto out_status;
 
                tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
-                               addr);
+                               addr, hlen);
 
                if (unlikely(tp_len < 0)) {
                        if (po->tp_loss) {
@@ -2207,6 +2210,7 @@ static int packet_snd(struct socket *sock,
        int vnet_hdr_len;
        struct packet_sock *po = pkt_sk(sk);
        unsigned short gso_type = 0;
+       int hlen, tlen;
 
        /*
         *      Get and verify the address.
@@ -2291,8 +2295,9 @@ static int packet_snd(struct socket *sock,
                goto out_unlock;
 
        err = -ENOBUFS;
-       skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
-                              LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
+       hlen = LL_RESERVED_SPACE(dev);
+       tlen = dev->needed_tailroom;
+       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
                               msg->msg_flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto out_unlock;
index 2ba6e9f..9f60008 100644 (file)
@@ -534,6 +534,29 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
        return pipe_handler_send_created_ind(sk);
 }
 
+static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+       struct pnpipehdr *hdr = pnp_hdr(skb);
+
+       if (hdr->error_code != PN_PIPE_NO_ERROR)
+               return -ECONNREFUSED;
+
+       return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
+               NULL, 0, GFP_ATOMIC);
+
+}
+
+static void pipe_start_flow_control(struct sock *sk)
+{
+       struct pep_sock *pn = pep_sk(sk);
+
+       if (!pn_flow_safe(pn->tx_fc)) {
+               atomic_set(&pn->tx_credits, 1);
+               sk->sk_write_space(sk);
+       }
+       pipe_grant_credits(sk, GFP_ATOMIC);
+}
+
 /* Queue an skb to an actively connected sock.
  * Socket lock must be held. */
 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
@@ -579,13 +602,25 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
                        sk->sk_state = TCP_CLOSE_WAIT;
                        break;
                }
+               if (pn->init_enable == PN_PIPE_DISABLE)
+                       sk->sk_state = TCP_SYN_RECV;
+               else {
+                       sk->sk_state = TCP_ESTABLISHED;
+                       pipe_start_flow_control(sk);
+               }
+               break;
 
-               sk->sk_state = TCP_ESTABLISHED;
-               if (!pn_flow_safe(pn->tx_fc)) {
-                       atomic_set(&pn->tx_credits, 1);
-                       sk->sk_write_space(sk);
+       case PNS_PEP_ENABLE_RESP:
+               if (sk->sk_state != TCP_SYN_SENT)
+                       break;
+
+               if (pep_enableresp_rcv(sk, skb)) {
+                       sk->sk_state = TCP_CLOSE_WAIT;
+                       break;
                }
-               pipe_grant_credits(sk, GFP_ATOMIC);
+
+               sk->sk_state = TCP_ESTABLISHED;
+               pipe_start_flow_control(sk);
                break;
 
        case PNS_PEP_DISCONNECT_RESP:
@@ -864,14 +899,32 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
        int err;
        u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
 
-       pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+       if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
+               pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+
        err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
-                                       PN_PIPE_ENABLE, data, 4);
+                               pn->init_enable, data, 4);
        if (err) {
                pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
                return err;
        }
+
        sk->sk_state = TCP_SYN_SENT;
+
+       return 0;
+}
+
+static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+{
+       int err;
+
+       err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
+                               NULL, 0);
+       if (err)
+               return err;
+
+       sk->sk_state = TCP_SYN_SENT;
+
        return 0;
 }
 
@@ -879,11 +932,14 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
        struct pep_sock *pn = pep_sk(sk);
        int answ;
+       int ret = -ENOIOCTLCMD;
 
        switch (cmd) {
        case SIOCINQ:
-               if (sk->sk_state == TCP_LISTEN)
-                       return -EINVAL;
+               if (sk->sk_state == TCP_LISTEN) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                lock_sock(sk);
                if (sock_flag(sk, SOCK_URGINLINE) &&
@@ -894,10 +950,22 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
                else
                        answ = 0;
                release_sock(sk);
-               return put_user(answ, (int __user *)arg);
+               ret = put_user(answ, (int __user *)arg);
+               break;
+
+       case SIOCPNENABLEPIPE:
+               lock_sock(sk);
+               if (sk->sk_state == TCP_SYN_SENT)
+                       ret =  -EBUSY;
+               else if (sk->sk_state == TCP_ESTABLISHED)
+                       ret = -EISCONN;
+               else
+                       ret = pep_sock_enable(sk, NULL, 0);
+               release_sock(sk);
+               break;
        }
 
-       return -ENOIOCTLCMD;
+       return ret;
 }
 
 static int pep_init(struct sock *sk)
@@ -960,6 +1028,18 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
                }
                goto out_norel;
 
+       case PNPIPE_HANDLE:
+               if ((sk->sk_state == TCP_CLOSE) &&
+                       (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
+                       pn->pipe_handle = val;
+               else
+                       err = -EINVAL;
+               break;
+
+       case PNPIPE_INITSTATE:
+               pn->init_enable = !!val;
+               break;
+
        default:
                err = -ENOPROTOOPT;
        }
@@ -995,6 +1075,10 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
                        return -EINVAL;
                break;
 
+       case PNPIPE_INITSTATE:
+               val = pn->init_enable;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
index 5be1957..354760e 100644 (file)
@@ -644,7 +644,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       err = strict_strtoul(buf, 0, &state);
+       err = kstrtoul(buf, 0, &state);
        if (err)
                return err;
 
@@ -688,7 +688,7 @@ static ssize_t rfkill_state_store(struct device *dev,
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
-       err = strict_strtoul(buf, 0, &state);
+       err = kstrtoul(buf, 0, &state);
        if (err)
                return err;
 
index 128677d..ca355e7 100644 (file)
@@ -220,18 +220,7 @@ static struct platform_driver rfkill_gpio_driver = {
        },
 };
 
-static int __init rfkill_gpio_init(void)
-{
-       return platform_driver_register(&rfkill_gpio_driver);
-}
-
-static void __exit rfkill_gpio_exit(void)
-{
-       platform_driver_unregister(&rfkill_gpio_driver);
-}
-
-module_init(rfkill_gpio_init);
-module_exit(rfkill_gpio_exit);
+module_platform_driver(rfkill_gpio_driver);
 
 MODULE_DESCRIPTION("gpio rfkill");
 MODULE_AUTHOR("NVIDIA");
index 3ca7277..2ebfe8d 100644 (file)
@@ -144,17 +144,7 @@ static struct platform_driver rfkill_regulator_driver = {
        },
 };
 
-static int __init rfkill_regulator_init(void)
-{
-       return platform_driver_register(&rfkill_regulator_driver);
-}
-module_init(rfkill_regulator_init);
-
-static void __exit rfkill_regulator_exit(void)
-{
-       platform_driver_unregister(&rfkill_regulator_driver);
-}
-module_exit(rfkill_regulator_exit);
+module_platform_driver(rfkill_regulator_driver);
 
 MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
index 43ea7de..4cba13e 100644 (file)
@@ -306,10 +306,9 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
        td->data_len = len;
 
        if (len > 0) {
-               td->data = kmalloc(len, GFP_KERNEL);
+               td->data = kmemdup(xdr, len, GFP_KERNEL);
                if (!td->data)
                        return -ENOMEM;
-               memcpy(td->data, xdr, len);
                len = (len + 3) & ~3;
                toklen -= len;
                xdr += len >> 2;
@@ -401,10 +400,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
        _debug("ticket len %u", len);
 
        if (len > 0) {
-               *_ticket = kmalloc(len, GFP_KERNEL);
+               *_ticket = kmemdup(xdr, len, GFP_KERNEL);
                if (!*_ticket)
                        return -ENOMEM;
-               memcpy(*_ticket, xdr, len);
                len = (len + 3) & ~3;
                toklen -= len;
                xdr += len >> 2;
index 7b58230..1d8bd0d 100644 (file)
@@ -26,6 +26,8 @@
 #include <net/pkt_cls.h>
 #include <net/ip.h>
 #include <net/route.h>
+#include <net/flow_keys.h>
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
@@ -66,134 +68,37 @@ static inline u32 addr_fold(void *addr)
        return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
 }
 
-static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be32 *data = NULL, hdata;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               data = skb_header_pointer(skb,
-                                         nhoff + offsetof(struct iphdr,
-                                                          saddr),
-                                         4, &hdata);
-               break;
-       case htons(ETH_P_IPV6):
-               data = skb_header_pointer(skb,
-                                        nhoff + offsetof(struct ipv6hdr,
-                                                         saddr.s6_addr32[3]),
-                                        4, &hdata);
-               break;
-       }
-
-       if (data)
-               return ntohl(*data);
+       if (flow->src)
+               return ntohl(flow->src);
        return addr_fold(skb->sk);
 }
 
-static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be32 *data = NULL, hdata;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               data = skb_header_pointer(skb,
-                                         nhoff + offsetof(struct iphdr,
-                                                          daddr),
-                                         4, &hdata);
-               break;
-       case htons(ETH_P_IPV6):
-               data = skb_header_pointer(skb,
-                                        nhoff + offsetof(struct ipv6hdr,
-                                                         daddr.s6_addr32[3]),
-                                        4, &hdata);
-               break;
-       }
-
-       if (data)
-               return ntohl(*data);
+       if (flow->dst)
+               return ntohl(flow->dst);
        return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 }
 
-static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __u8 *data = NULL, hdata;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-               data = skb_header_pointer(skb,
-                                         nhoff + offsetof(struct iphdr,
-                                                          protocol),
-                                         1, &hdata);
-               break;
-       case htons(ETH_P_IPV6):
-               data = skb_header_pointer(skb,
-                                        nhoff + offsetof(struct ipv6hdr,
-                                                         nexthdr),
-                                        1, &hdata);
-               break;
-       }
-       if (data)
-               return *data;
-       return 0;
+       return flow->ip_proto;
 }
 
-/* helper function to get either src or dst port */
-static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
-                                    __be16 *_port, int dst)
+static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be16 *port = NULL;
-       int poff;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP): {
-               struct iphdr *iph, _iph;
-
-               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-               if (!iph)
-                       break;
-               if (ip_is_fragment(iph))
-                       break;
-               poff = proto_ports_offset(iph->protocol);
-               if (poff >= 0)
-                       port = skb_header_pointer(skb,
-                                       nhoff + iph->ihl * 4 + poff + dst,
-                                       sizeof(*_port), _port);
-               break;
-       }
-       case htons(ETH_P_IPV6): {
-               struct ipv6hdr *iph, _iph;
-
-               iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
-               if (!iph)
-                       break;
-               poff = proto_ports_offset(iph->nexthdr);
-               if (poff >= 0)
-                       port = skb_header_pointer(skb,
-                                       nhoff + sizeof(*iph) + poff + dst,
-                                       sizeof(*_port), _port);
-               break;
-       }
-       }
-
-       return port;
-}
-
-static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
-{
-       __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
-
-       if (port)
-               return ntohs(*port);
+       if (flow->ports)
+               return ntohs(flow->port16[0]);
 
        return addr_fold(skb->sk);
 }
 
-static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
-
-       if (port)
-               return ntohs(*port);
+       if (flow->ports)
+               return ntohs(flow->port16[1]);
 
        return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 }
@@ -239,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 })
 #endif
 
-static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        switch (skb->protocol) {
        case htons(ETH_P_IP):
@@ -248,10 +153,10 @@ static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
                return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
        }
 fallback:
-       return flow_get_src(skb, nhoff);
+       return flow_get_src(skb, flow);
 }
 
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        switch (skb->protocol) {
        case htons(ETH_P_IP):
@@ -260,21 +165,21 @@ static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
                return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
        }
 fallback:
-       return flow_get_dst(skb, nhoff);
+       return flow_get_dst(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        return ntohs(CTTUPLE(skb, src.u.all));
 fallback:
-       return flow_get_proto_src(skb, nhoff);
+       return flow_get_proto_src(skb, flow);
 }
 
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
        return ntohs(CTTUPLE(skb, dst.u.all));
 fallback:
-       return flow_get_proto_dst(skb, nhoff);
+       return flow_get_proto_dst(skb, flow);
 }
 
 static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -314,21 +219,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
        return skb_get_rxhash(skb);
 }
 
-static u32 flow_key_get(struct sk_buff *skb, int key)
+static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
 {
-       int nhoff = skb_network_offset(skb);
-
        switch (key) {
        case FLOW_KEY_SRC:
-               return flow_get_src(skb, nhoff);
+               return flow_get_src(skb, flow);
        case FLOW_KEY_DST:
-               return flow_get_dst(skb, nhoff);
+               return flow_get_dst(skb, flow);
        case FLOW_KEY_PROTO:
-               return flow_get_proto(skb, nhoff);
+               return flow_get_proto(skb, flow);
        case FLOW_KEY_PROTO_SRC:
-               return flow_get_proto_src(skb, nhoff);
+               return flow_get_proto_src(skb, flow);
        case FLOW_KEY_PROTO_DST:
-               return flow_get_proto_dst(skb, nhoff);
+               return flow_get_proto_dst(skb, flow);
        case FLOW_KEY_IIF:
                return flow_get_iif(skb);
        case FLOW_KEY_PRIORITY:
@@ -338,13 +241,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
        case FLOW_KEY_NFCT:
                return flow_get_nfct(skb);
        case FLOW_KEY_NFCT_SRC:
-               return flow_get_nfct_src(skb, nhoff);
+               return flow_get_nfct_src(skb, flow);
        case FLOW_KEY_NFCT_DST:
-               return flow_get_nfct_dst(skb, nhoff);
+               return flow_get_nfct_dst(skb, flow);
        case FLOW_KEY_NFCT_PROTO_SRC:
-               return flow_get_nfct_proto_src(skb, nhoff);
+               return flow_get_nfct_proto_src(skb, flow);
        case FLOW_KEY_NFCT_PROTO_DST:
-               return flow_get_nfct_proto_dst(skb, nhoff);
+               return flow_get_nfct_proto_dst(skb, flow);
        case FLOW_KEY_RTCLASSID:
                return flow_get_rtclassid(skb);
        case FLOW_KEY_SKUID:
@@ -361,6 +264,16 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
        }
 }
 
+#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) |                \
+                         (1 << FLOW_KEY_DST) |                 \
+                         (1 << FLOW_KEY_PROTO) |               \
+                         (1 << FLOW_KEY_PROTO_SRC) |           \
+                         (1 << FLOW_KEY_PROTO_DST) |           \
+                         (1 << FLOW_KEY_NFCT_SRC) |            \
+                         (1 << FLOW_KEY_NFCT_DST) |            \
+                         (1 << FLOW_KEY_NFCT_PROTO_SRC) |      \
+                         (1 << FLOW_KEY_NFCT_PROTO_DST))
+
 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                         struct tcf_result *res)
 {
@@ -372,17 +285,20 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        int r;
 
        list_for_each_entry(f, &head->filters, list) {
-               u32 keys[f->nkeys];
+               u32 keys[FLOW_KEY_MAX + 1];
+               struct flow_keys flow_keys;
 
                if (!tcf_em_tree_match(skb, &f->ematches, NULL))
                        continue;
 
                keymask = f->keymask;
+               if (keymask & FLOW_KEYS_NEEDED)
+                       skb_flow_dissect(skb, &flow_keys);
 
                for (n = 0; n < f->nkeys; n++) {
                        key = ffs(keymask) - 1;
                        keymask &= ~(1 << key);
-                       keys[n] = flow_key_get(skb, key);
+                       keys[n] = flow_key_get(skb, key, &flow_keys);
                }
 
                if (f->mode == FLOW_MODE_HASH)
index 3422b25..bef00ac 100644 (file)
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/red.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
+#include <net/flow_keys.h>
 
 /*
    CHOKe stateless AQM for fair bandwidth allocation
@@ -142,85 +139,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
        --sch->q.qlen;
 }
 
-/*
- * Compare flow of two packets
- *  Returns true only if source and destination address and port match.
- *          false for special cases
- */
-static bool choke_match_flow(struct sk_buff *skb1,
-                            struct sk_buff *skb2)
-{
-       int off1, off2, poff;
-       const u32 *ports1, *ports2;
-       u8 ip_proto;
-       __u32 hash1;
-
-       if (skb1->protocol != skb2->protocol)
-               return false;
-
-       /* Use hash value as quick check
-        * Assumes that __skb_get_rxhash makes IP header and ports linear
-        */
-       hash1 = skb_get_rxhash(skb1);
-       if (!hash1 || hash1 != skb_get_rxhash(skb2))
-               return false;
-
-       /* Probably match, but be sure to avoid hash collisions */
-       off1 = skb_network_offset(skb1);
-       off2 = skb_network_offset(skb2);
-
-       switch (skb1->protocol) {
-       case __constant_htons(ETH_P_IP): {
-               const struct iphdr *ip1, *ip2;
-
-               ip1 = (const struct iphdr *) (skb1->data + off1);
-               ip2 = (const struct iphdr *) (skb2->data + off2);
-
-               ip_proto = ip1->protocol;
-               if (ip_proto != ip2->protocol ||
-                   ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
-                       return false;
-
-               if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
-                       ip_proto = 0;
-               off1 += ip1->ihl * 4;
-               off2 += ip2->ihl * 4;
-               break;
-       }
-
-       case __constant_htons(ETH_P_IPV6): {
-               const struct ipv6hdr *ip1, *ip2;
-
-               ip1 = (const struct ipv6hdr *) (skb1->data + off1);
-               ip2 = (const struct ipv6hdr *) (skb2->data + off2);
-
-               ip_proto = ip1->nexthdr;
-               if (ip_proto != ip2->nexthdr ||
-                   ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
-                   ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
-                       return false;
-               off1 += 40;
-               off2 += 40;
-       }
-
-       default: /* Maybe compare MAC header here? */
-               return false;
-       }
-
-       poff = proto_ports_offset(ip_proto);
-       if (poff < 0)
-               return true;
-
-       off1 += poff;
-       off2 += poff;
-
-       ports1 = (__force u32 *)(skb1->data + off1);
-       ports2 = (__force u32 *)(skb2->data + off2);
-       return *ports1 == *ports2;
-}
-
 struct choke_skb_cb {
-       u16 classid;
+       u16                     classid;
+       u8                      keys_valid;
+       struct flow_keys        keys;
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -241,6 +163,32 @@ static u16 choke_get_classid(const struct sk_buff *skb)
 }
 
 /*
+ * Compare flow of two packets
+ *  Returns true only if source and destination address and port match.
+ *          false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+                            struct sk_buff *skb2)
+{
+       if (skb1->protocol != skb2->protocol)
+               return false;
+
+       if (!choke_skb_cb(skb1)->keys_valid) {
+               choke_skb_cb(skb1)->keys_valid = 1;
+               skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
+       }
+
+       if (!choke_skb_cb(skb2)->keys_valid) {
+               choke_skb_cb(skb2)->keys_valid = 1;
+               skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
+       }
+
+       return !memcmp(&choke_skb_cb(skb1)->keys,
+                      &choke_skb_cb(skb2)->keys,
+                      sizeof(struct flow_keys));
+}
+
+/*
  * Classify flow using either:
  *  1. pre-existing classification result in skb
  *  2. fast internal classification
@@ -326,6 +274,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                        goto other_drop;        /* Packet was eaten by filter */
        }
 
+       choke_skb_cb(skb)->keys_valid = 0;
        /* Compute average queue usage (see RED) */
        p->qavg = red_calc_qavg(p, sch->q.qlen);
        if (red_is_idling(p))
@@ -445,6 +394,7 @@ static void choke_reset(struct Qdisc *sch)
 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
        [TCA_CHOKE_PARMS]       = { .len = sizeof(struct tc_red_qopt) },
        [TCA_CHOKE_STAB]        = { .len = RED_STAB_SIZE },
+       [TCA_CHOKE_MAX_P]       = { .type = NLA_U32 },
 };
 
 
@@ -466,6 +416,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
        int err;
        struct sk_buff **old = NULL;
        unsigned int mask;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -478,6 +429,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_CHOKE_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -527,7 +480,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_CHOKE_STAB]));
+                     nla_data(tb[TCA_CHOKE_STAB]),
+                     max_P);
 
        if (q->head == q->tail)
                red_end_of_idle_period(&q->parms);
@@ -561,6 +515,7 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
 
        NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+       NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index 69fca27..67fc573 100644 (file)
@@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
 
                /* check the reason of requeuing without tx lock first */
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-               if (!netif_tx_queue_frozen_or_stopped(txq)) {
+               if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
                        q->q.qlen--;
                } else
@@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        spin_unlock(root_lock);
 
        HARD_TX_LOCK(dev, txq, smp_processor_id());
-       if (!netif_tx_queue_frozen_or_stopped(txq))
+       if (!netif_xmit_frozen_or_stopped(txq))
                ret = dev_hard_start_xmit(skb, dev, txq);
 
        HARD_TX_UNLOCK(dev, txq);
@@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                ret = dev_requeue_skb(skb, q);
        }
 
-       if (ret && netif_tx_queue_frozen_or_stopped(txq))
+       if (ret && netif_xmit_frozen_or_stopped(txq))
                ret = 0;
 
        return ret;
@@ -242,10 +242,11 @@ static void dev_watchdog(unsigned long arg)
                                 * old device drivers set dev->trans_start
                                 */
                                trans_start = txq->trans_start ? : dev->trans_start;
-                               if (netif_tx_queue_stopped(txq) &&
+                               if (netif_xmit_stopped(txq) &&
                                    time_after(jiffies, (trans_start +
                                                         dev->watchdog_timeo))) {
                                        some_queue_timedout = 1;
+                                       txq->trans_timeout++;
                                        break;
                                }
                        }
index 6cd8ddf..1b5e631 100644 (file)
@@ -34,7 +34,7 @@ struct gred_sched;
 
 struct gred_sched_data {
        u32             limit;          /* HARD maximal queue length    */
-       u32             DP;             /* the drop pramaters */
+       u32             DP;             /* the drop parameters */
        u32             bytesin;        /* bytes seen on virtualQ so far*/
        u32             packetsin;      /* packets seen on virtualQ so far*/
        u32             backlog;        /* bytes on the virtualQ */
@@ -379,7 +379,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 }
 
 static inline int gred_change_vq(struct Qdisc *sch, int dp,
-                                struct tc_gred_qopt *ctl, int prio, u8 *stab)
+                                struct tc_gred_qopt *ctl, int prio,
+                                u8 *stab, u32 max_P)
 {
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q;
@@ -400,7 +401,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
 
        red_set_parms(&q->parms,
                      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
-                     ctl->Scell_log, stab);
+                     ctl->Scell_log, stab, max_P);
 
        return 0;
 }
@@ -409,6 +410,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
        [TCA_GRED_STAB]         = { .len = 256 },
        [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
+       [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -418,6 +420,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
        struct nlattr *tb[TCA_GRED_MAX + 1];
        int err, prio = GRED_DEF_PRIO;
        u8 *stab;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -433,6 +436,8 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_GRED_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+
        err = -EINVAL;
        ctl = nla_data(tb[TCA_GRED_PARMS]);
        stab = nla_data(tb[TCA_GRED_STAB]);
@@ -457,7 +462,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 
        sch_tree_lock(sch);
 
-       err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
+       err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P);
        if (err < 0)
                goto errout_locked;
 
@@ -498,6 +503,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct gred_sched *table = qdisc_priv(sch);
        struct nlattr *parms, *opts = NULL;
        int i;
+       u32 max_p[MAX_DPs];
        struct tc_gred_sopt sopt = {
                .DPs    = table->DPs,
                .def_DP = table->def,
@@ -509,6 +515,14 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
        NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
+
+       for (i = 0; i < MAX_DPs; i++) {
+               struct gred_sched_data *q = table->tab[i];
+
+               max_p[i] = q ? q->parms.max_P : 0;
+       }
+       NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p);
+
        parms = nla_nest_start(skb, TCA_GRED_PARMS);
        if (parms == NULL)
                goto nla_put_failure;
index edc1950..49131d7 100644 (file)
@@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
                /* Check that target subqueue is available before
                 * pulling an skb to avoid head-of-line blocking.
                 */
-               if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
+               if (!netif_xmit_stopped(
+                   netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
                        qdisc = q->queues[q->curband];
                        skb = qdisc->dequeue(qdisc);
                        if (skb) {
@@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
                /* Check that target subqueue is available before
                 * pulling an skb to avoid head-of-line blocking.
                 */
-               if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
+               if (!netif_xmit_stopped(
+                   netdev_get_tx_queue(qdisc_dev(sch), curband))) {
                        qdisc = q->queues[curband];
                        skb = qdisc->ops->peek(qdisc);
                        if (skb)
index eb3b9a8..1fa2f90 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
+#include <linux/reciprocal_div.h>
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
@@ -79,6 +80,11 @@ struct netem_sched_data {
        u32 duplicate;
        u32 reorder;
        u32 corrupt;
+       u32 rate;
+       s32 packet_overhead;
+       u32 cell_size;
+       u32 cell_size_reciprocal;
+       s32 cell_overhead;
 
        struct crndstate {
                u32 last;
@@ -298,6 +304,26 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
        return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 }
 
+static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
+{
+       u64 ticks;
+
+       len += q->packet_overhead;
+
+       if (q->cell_size) {
+               u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
+
+               if (len > cells * q->cell_size) /* extra cell needed for remainder */
+                       cells++;
+               len = cells * (q->cell_size + q->cell_overhead);
+       }
+
+       ticks = (u64)len * NSEC_PER_SEC;
+
+       do_div(ticks, q->rate);
+       return PSCHED_NS2TICKS(ticks);
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -371,6 +397,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                                  &q->delay_cor, q->delay_dist);
 
                now = psched_get_time();
+
+               if (q->rate) {
+                       struct sk_buff_head *list = &q->qdisc->q;
+
+                       delay += packet_len_2_sched_time(skb->len, q);
+
+                       if (!skb_queue_empty(list)) {
+                               /*
+                                * Last packet in queue is reference point (now).
+                                * First packet in queue is already in flight,
+                                * calculate this time bonus and substract
+                                * from delay.
+                                */
+                               delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+                               now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
+                       }
+               }
+
                cb->time_to_send = now + delay;
                ++q->counter;
                ret = qdisc_enqueue(skb, q->qdisc);
@@ -535,6 +579,19 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
        init_crandom(&q->corrupt_cor, r->correlation);
 }
 
+static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       const struct tc_netem_rate *r = nla_data(attr);
+
+       q->rate = r->rate;
+       q->packet_overhead = r->packet_overhead;
+       q->cell_size = r->cell_size;
+       if (q->cell_size)
+               q->cell_size_reciprocal = reciprocal_value(q->cell_size);
+       q->cell_overhead = r->cell_overhead;
+}
+
 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -594,6 +651,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
        [TCA_NETEM_CORR]        = { .len = sizeof(struct tc_netem_corr) },
        [TCA_NETEM_REORDER]     = { .len = sizeof(struct tc_netem_reorder) },
        [TCA_NETEM_CORRUPT]     = { .len = sizeof(struct tc_netem_corrupt) },
+       [TCA_NETEM_RATE]        = { .len = sizeof(struct tc_netem_rate) },
        [TCA_NETEM_LOSS]        = { .type = NLA_NESTED },
 };
 
@@ -666,6 +724,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_NETEM_CORRUPT])
                get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
 
+       if (tb[TCA_NETEM_RATE])
+               get_rate(sch, tb[TCA_NETEM_RATE]);
+
        q->loss_model = CLG_RANDOM;
        if (tb[TCA_NETEM_LOSS])
                ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -846,6 +907,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        struct tc_netem_corr cor;
        struct tc_netem_reorder reorder;
        struct tc_netem_corrupt corrupt;
+       struct tc_netem_rate rate;
 
        qopt.latency = q->latency;
        qopt.jitter = q->jitter;
@@ -868,6 +930,12 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
        corrupt.correlation = q->corrupt_cor.rho;
        NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
 
+       rate.rate = q->rate;
+       rate.packet_overhead = q->packet_overhead;
+       rate.cell_size = q->cell_size;
+       rate.cell_overhead = q->cell_overhead;
+       NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
+
        if (dump_loss_model(q, skb) != 0)
                goto nla_put_failure;
 
index d617161..ce2256a 100644 (file)
@@ -39,6 +39,7 @@
 struct red_sched_data {
        u32                     limit;          /* HARD maximal queue length */
        unsigned char           flags;
+       struct timer_list       adapt_timer;
        struct red_parms        parms;
        struct red_stats        stats;
        struct Qdisc            *qdisc;
@@ -161,12 +162,15 @@ static void red_reset(struct Qdisc *sch)
 static void red_destroy(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
+
+       del_timer_sync(&q->adapt_timer);
        qdisc_destroy(q->qdisc);
 }
 
 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
        [TCA_RED_STAB]  = { .len = RED_STAB_SIZE },
+       [TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
 static int red_change(struct Qdisc *sch, struct nlattr *opt)
@@ -176,6 +180,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        struct tc_red_qopt *ctl;
        struct Qdisc *child = NULL;
        int err;
+       u32 max_P;
 
        if (opt == NULL)
                return -EINVAL;
@@ -188,6 +193,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
            tb[TCA_RED_STAB] == NULL)
                return -EINVAL;
 
+       max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+
        ctl = nla_data(tb[TCA_RED_PARMS]);
 
        if (ctl->limit > 0) {
@@ -206,8 +213,13 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
-                                ctl->Plog, ctl->Scell_log,
-                                nla_data(tb[TCA_RED_STAB]));
+                     ctl->Plog, ctl->Scell_log,
+                     nla_data(tb[TCA_RED_STAB]),
+                     max_P);
+
+       del_timer(&q->adapt_timer);
+       if (ctl->flags & TC_RED_ADAPTATIVE)
+               mod_timer(&q->adapt_timer, jiffies + HZ/2);
 
        if (!q->qdisc->q.qlen)
                red_start_of_idle_period(&q->parms);
@@ -216,11 +228,24 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
        return 0;
 }
 
+static inline void red_adaptative_timer(unsigned long arg)
+{
+       struct Qdisc *sch = (struct Qdisc *)arg;
+       struct red_sched_data *q = qdisc_priv(sch);
+       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+
+       spin_lock(root_lock);
+       red_adaptative_algo(&q->parms);
+       mod_timer(&q->adapt_timer, jiffies + HZ/2);
+       spin_unlock(root_lock);
+}
+
 static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
        q->qdisc = &noop_qdisc;
+       setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
        return red_change(sch, opt);
 }
 
@@ -243,6 +268,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
        NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
+       NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P);
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
index e83c272..96e42ca 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
+#include <net/flow_keys.h>
 
 /*
  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -286,6 +287,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        u32 minqlen = ~0;
        u32 r, slot, salt, sfbhash;
        int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+       struct flow_keys keys;
 
        if (unlikely(sch->q.qlen >= q->limit)) {
                sch->qstats.overlimits++;
@@ -309,13 +311,19 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                /* If using external classifiers, get result and record it. */
                if (!sfb_classify(skb, q, &ret, &salt))
                        goto other_drop;
+               keys.src = salt;
+               keys.dst = 0;
+               keys.ports = 0;
        } else {
-               salt = skb_get_rxhash(skb);
+               skb_flow_dissect(skb, &keys);
        }
 
        slot = q->slot;
 
-       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+       sfbhash = jhash_3words((__force u32)keys.dst,
+                              (__force u32)keys.src,
+                              (__force u32)keys.ports,
+                              q->bins[slot].perturbation);
        if (!sfbhash)
                sfbhash = 1;
        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -347,7 +355,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (unlikely(p_min >= SFB_MAX_PROB)) {
                /* Inelastic flow */
                if (q->double_buffering) {
-                       sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+                       sfbhash = jhash_3words((__force u32)keys.dst,
+                                              (__force u32)keys.src,
+                                              (__force u32)keys.ports,
+                                              q->bins[slot].perturbation);
                        if (!sfbhash)
                                sfbhash = 1;
                        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
index 4f5510e..30cda70 100644 (file)
 #include <linux/in.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/ipv6.h>
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/flow_keys.h>
 
 
 /*     Stochastic Fairness Queuing algorithm.
@@ -137,61 +136,17 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
        return &q->dep[val - SFQ_SLOTS];
 }
 
-static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_hash(const struct sfq_sched_data *q,
+                            const struct sk_buff *skb)
 {
-       return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
-}
-
-static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
-{
-       u32 h, h2;
-
-       switch (skb->protocol) {
-       case htons(ETH_P_IP):
-       {
-               const struct iphdr *iph;
-               int poff;
-
-               if (!pskb_network_may_pull(skb, sizeof(*iph)))
-                       goto err;
-               iph = ip_hdr(skb);
-               h = (__force u32)iph->daddr;
-               h2 = (__force u32)iph->saddr ^ iph->protocol;
-               if (ip_is_fragment(iph))
-                       break;
-               poff = proto_ports_offset(iph->protocol);
-               if (poff >= 0 &&
-                   pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
-                       iph = ip_hdr(skb);
-                       h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
-               }
-               break;
-       }
-       case htons(ETH_P_IPV6):
-       {
-               const struct ipv6hdr *iph;
-               int poff;
-
-               if (!pskb_network_may_pull(skb, sizeof(*iph)))
-                       goto err;
-               iph = ipv6_hdr(skb);
-               h = (__force u32)iph->daddr.s6_addr32[3];
-               h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
-               poff = proto_ports_offset(iph->nexthdr);
-               if (poff >= 0 &&
-                   pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
-                       iph = ipv6_hdr(skb);
-                       h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
-               }
-               break;
-       }
-       default:
-err:
-               h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
-               h2 = (unsigned long)skb->sk;
-       }
+       struct flow_keys keys;
+       unsigned int hash;
 
-       return sfq_fold_hash(q, h, h2);
+       skb_flow_dissect(skb, &keys);
+       hash = jhash_3words((__force u32)keys.dst,
+                           (__force u32)keys.src ^ keys.ip_proto,
+                           (__force u32)keys.ports, q->perturbation);
+       return hash & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
index 4f4c52c..4532659 100644 (file)
@@ -277,7 +277,7 @@ static inline int teql_resolve(struct sk_buff *skb,
                return 0;
 
        rcu_read_lock();
-       mn = dst_get_neighbour(dst);
+       mn = dst_get_neighbour_noref(dst);
        res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
        rcu_read_unlock();
 
@@ -310,7 +310,7 @@ restart:
 
                if (slave_txq->qdisc_sleeping != q)
                        continue;
-               if (__netif_subqueue_stopped(slave, subq) ||
+               if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
                    !netif_running(slave)) {
                        busy = 1;
                        continue;
@@ -321,7 +321,7 @@ restart:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
 
-                               if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
+                               if (!netif_xmit_frozen_or_stopped(slave_txq) &&
                                    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
                                        txq_trans_update(slave_txq);
                                        __netif_tx_unlock(slave_txq);
@@ -333,7 +333,7 @@ restart:
                                }
                                __netif_tx_unlock(slave_txq);
                        }
-                       if (netif_queue_stopped(dev))
+                       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
                                busy = 1;
                        break;
                case 1:
index b7692aa..80f71af 100644 (file)
@@ -105,7 +105,7 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
 struct sctp_input_cb {
        union {
                struct inet_skb_parm    h4;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                struct inet6_skb_parm   h6;
 #endif
        } header;
index 8104278..91f4791 100644 (file)
@@ -107,7 +107,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
-                       ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr);
+                       addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
                        spin_lock_bh(&sctp_local_addr_lock);
@@ -219,8 +219,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        /* Fill in the dest address from the route entry passed with the skb
         * and the source address from the transport.
         */
-       ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
-       ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+       fl6.daddr = transport->ipaddr.v6.sin6_addr;
+       fl6.saddr = transport->saddr.v6.sin6_addr;
 
        fl6.flowlabel = np->flow_label;
        IP6_ECN_flow_xmit(sk, fl6.flowlabel);
@@ -231,7 +231,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
 
        if (np->opt && np->opt->srcrt) {
                struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
-               ipv6_addr_copy(&fl6.daddr, rt0->addr);
+               fl6.daddr = *rt0->addr;
        }
 
        SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
@@ -265,7 +265,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        sctp_scope_t scope;
 
        memset(fl6, 0, sizeof(struct flowi6));
-       ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
+       fl6->daddr = daddr->v6.sin6_addr;
        fl6->fl6_dport = daddr->v6.sin6_port;
        fl6->flowi6_proto = IPPROTO_SCTP;
        if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
@@ -277,7 +277,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                fl6->fl6_sport = htons(asoc->base.bind_addr.port);
 
        if (saddr) {
-               ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
+               fl6->saddr = saddr->v6.sin6_addr;
                fl6->fl6_sport = saddr->v6.sin6_port;
                SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
        }
@@ -334,7 +334,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
        }
        rcu_read_unlock();
        if (baddr) {
-               ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+               fl6->saddr = baddr->v6.sin6_addr;
                fl6->fl6_sport = baddr->v6.sin6_port;
                dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
        }
@@ -375,7 +375,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
 
        if (t->dst) {
                saddr->v6.sin6_family = AF_INET6;
-               ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
+               saddr->v6.sin6_addr = fl6->saddr;
        }
 }
 
@@ -400,7 +400,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
-                       ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
+                       addr->a.v6.sin6_addr = ifp->addr;
                        addr->a.v6.sin6_scope_id = dev->ifindex;
                        addr->valid = 1;
                        INIT_LIST_HEAD(&addr->list);
@@ -416,7 +416,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
 static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
                             int is_saddr)
 {
-       void *from;
        __be16 *port;
        struct sctphdr *sh;
 
@@ -428,12 +427,11 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
        sh = sctp_hdr(skb);
        if (is_saddr) {
                *port  = sh->source;
-               from = &ipv6_hdr(skb)->saddr;
+               addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
        } else {
                *port = sh->dest;
-               from = &ipv6_hdr(skb)->daddr;
+               addr->v6.sin6_addr = ipv6_hdr(skb)->daddr;
        }
-       ipv6_addr_copy(&addr->v6.sin6_addr, from);
 }
 
 /* Initialize an sctp_addr from a socket. */
@@ -441,7 +439,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
 {
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_port = 0;
-       ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
+       addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -454,7 +452,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
                inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
                        addr->v4.sin_addr.s_addr;
        } else {
-               ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
+               inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
        }
 }
 
@@ -467,7 +465,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
                inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
                inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        } else {
-               ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
+               inet6_sk(sk)->daddr = addr->v6.sin6_addr;
        }
 }
 
@@ -479,7 +477,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr,
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_port = port;
        addr->v6.sin6_flowinfo = 0; /* BUG */
-       ipv6_addr_copy(&addr->v6.sin6_addr, &param->v6.addr);
+       addr->v6.sin6_addr = param->v6.addr;
        addr->v6.sin6_scope_id = iif;
 }
 
@@ -493,7 +491,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
 
        param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS;
        param->v6.param_hdr.length = htons(length);
-       ipv6_addr_copy(&param->v6.addr, &addr->v6.sin6_addr);
+       param->v6.addr = addr->v6.sin6_addr;
 
        return length;
 }
@@ -504,7 +502,7 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
 {
        addr->sa.sa_family = AF_INET6;
        addr->v6.sin6_port = port;
-       ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
+       addr->v6.sin6_addr = *saddr;
 }
 
 /* Compare addresses exactly.
@@ -759,7 +757,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
                }
 
                sin6from = &asoc->peer.primary_addr.v6;
-               ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr);
+               sin6->sin6_addr = sin6from->sin6_addr;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                        sin6->sin6_scope_id = sin6from->sin6_scope_id;
        }
@@ -787,7 +785,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
                }
 
                /* Otherwise, just copy the v6 address. */
-               ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr);
+               sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
                        struct sctp_ulpevent *ev = sctp_skb2event(skb);
                        sin6->sin6_scope_id = ev->iif;
index 61b9fca..544a9b6 100644 (file)
@@ -637,7 +637,7 @@ void sctp_addr_wq_timeout_handler(unsigned long arg)
                    " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
                    addrw);
 
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                /* Now we send an ASCONF for each association */
                /* Note. we currently don't handle link local IPv6 addressees */
                if (addrw->a.sa.sa_family == AF_INET6) {
index 0121e0a..a85eeeb 100644 (file)
@@ -3400,8 +3400,10 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
                asconf_len -= length;
        }
 
-       if (no_err && asoc->src_out_of_asoc_ok)
+       if (no_err && asoc->src_out_of_asoc_ok) {
                asoc->src_out_of_asoc_ok = 0;
+               sctp_transport_immediate_rtx(asoc->peer.primary_path);
+       }
 
        /* Free the cached last sent asconf chunk. */
        list_del_init(&asconf->transmitted_list);
index 76388b0..1ff51c9 100644 (file)
@@ -666,6 +666,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
                                  struct sctp_chunk *chunk)
 {
        sctp_sender_hb_info_t *hbinfo;
+       int was_unconfirmed = 0;
 
        /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
         * HEARTBEAT should clear the error counter of the destination
@@ -692,9 +693,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* Mark the destination transport address as active if it is not so
         * marked.
         */
-       if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED))
+       if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
+               was_unconfirmed = 1;
                sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
                                             SCTP_HEARTBEAT_SUCCESS);
+       }
 
        /* The receiver of the HEARTBEAT ACK should also perform an
         * RTT measurement for that destination transport address
@@ -712,6 +715,9 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        /* Update the heartbeat timer.  */
        if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
                sctp_transport_hold(t);
+
+       if (was_unconfirmed && asoc->peer.transport_count == 1)
+               sctp_transport_immediate_rtx(t);
 }
 
 
index 13bf5fc..db03083 100644 (file)
@@ -804,7 +804,7 @@ static int sctp_send_asconf_del_ip(struct sock              *sk,
                                struct sockaddr_in6 *sin6;
 
                                sin6 = (struct sockaddr_in6 *)addrs;
-                               ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
+                               asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
                        }
                        SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
                            " at %p\n", asoc, asoc->asconf_addr_del_pending,
@@ -6841,7 +6841,7 @@ struct proto sctp_prot = {
        .sockets_allocated = &sctp_sockets_allocated,
 };
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 struct proto sctpv6_prot = {
        .name           = "SCTPv6",
@@ -6872,4 +6872,4 @@ struct proto sctpv6_prot = {
        .memory_allocated = &sctp_memory_allocated,
        .sockets_allocated = &sctp_sockets_allocated,
 };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
index 394c57c..3889330 100644 (file)
@@ -641,3 +641,19 @@ void sctp_transport_reset(struct sctp_transport *t)
        t->cacc.next_tsn_at_change = 0;
        t->cacc.cacc_saw_newack = 0;
 }
+
+/* Schedule retransmission on the given transport */
+void sctp_transport_immediate_rtx(struct sctp_transport *t)
+{
+       /* Stop pending T3_rtx_timer */
+       if (timer_pending(&t->T3_rtx_timer)) {
+               (void)del_timer(&t->T3_rtx_timer);
+               sctp_transport_put(t);
+       }
+       sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
+       if (!timer_pending(&t->T3_rtx_timer)) {
+               if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
+                       sctp_transport_hold(t);
+       }
+       return;
+}
index 2877647..e62b4f0 100644 (file)
@@ -538,6 +538,8 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
                *tx_flags |= SKBTX_HW_TSTAMP;
        if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
                *tx_flags |= SKBTX_SW_TSTAMP;
+       if (sock_flag(sk, SOCK_WIFI_STATUS))
+               *tx_flags |= SKBTX_WIFI_STATUS;
        return 0;
 }
 EXPORT_SYMBOL(sock_tx_timestamp);
@@ -549,6 +551,8 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
 
        sock_update_classid(sock->sk);
 
+       sock_update_netprioidx(sock->sk);
+
        si->sock = sock;
        si->scm = NULL;
        si->msg = msg;
@@ -674,6 +678,22 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
 
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+       struct sk_buff *skb)
+{
+       int ack;
+
+       if (!sock_flag(sk, SOCK_WIFI_STATUS))
+               return;
+       if (!skb->wifi_acked_valid)
+               return;
+
+       ack = skb->wifi_acked;
+
+       put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack);
+}
+EXPORT_SYMBOL_GPL(__sock_recv_wifi_status);
+
 static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
                                   struct sk_buff *skb)
 {
index 67a655e..ee77742 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
                                  char *buf, const int buflen)
@@ -91,7 +91,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
        return len;
 }
 
-#else  /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#else  /* !IS_ENABLED(CONFIG_IPV6) */
 
 static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
                                  char *buf, const int buflen)
@@ -105,7 +105,7 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
        return 0;
 }
 
-#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
+#endif /* !IS_ENABLED(CONFIG_IPV6) */
 
 static int rpc_ntop4(const struct sockaddr *sap,
                     char *buf, const size_t buflen)
@@ -155,7 +155,7 @@ static size_t rpc_pton4(const char *buf, const size_t buflen,
        return sizeof(struct sockaddr_in);
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 static int rpc_parse_scope_id(const char *buf, const size_t buflen,
                              const char *delim, struct sockaddr_in6 *sin6)
 {
index 6e03888..9d01d46 100644 (file)
@@ -826,7 +826,7 @@ static int __svc_rpcb_register4(const u32 program, const u32 version,
        return error;
 }
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 /*
  * Register an "inet6" protocol family netid with the local
  * rpcbind daemon via an rpcbind v4 SET request.
@@ -872,7 +872,7 @@ static int __svc_rpcb_register6(const u32 program, const u32 version,
 
        return error;
 }
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif /* IS_ENABLED(CONFIG_IPV6) */
 
 /*
  * Register a kernel RPC service via rpcbind version 4.
@@ -893,11 +893,11 @@ static int __svc_register(const char *progname,
                error = __svc_rpcb_register4(program, version,
                                                protocol, port);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                error = __svc_rpcb_register6(program, version,
                                                protocol, port);
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        }
 
        if (error < 0)
index 447cd0e..38649cf 100644 (file)
@@ -179,13 +179,13 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                .sin_addr.s_addr        = htonl(INADDR_ANY),
                .sin_port               = htons(port),
        };
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        struct sockaddr_in6 sin6 = {
                .sin6_family            = AF_INET6,
                .sin6_addr              = IN6ADDR_ANY_INIT,
                .sin6_port              = htons(port),
        };
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        struct sockaddr *sap;
        size_t len;
 
@@ -194,12 +194,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
                sap = (struct sockaddr *)&sin;
                len = sizeof(sin);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                sap = (struct sockaddr *)&sin6;
                len = sizeof(sin6);
                break;
-#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
+#endif
        default:
                return ERR_PTR(-EAFNOSUPPORT);
        }
index ce13632..01153ea 100644 (file)
@@ -134,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
        struct ip_map *item = container_of(citem, struct ip_map, h);
 
        strcpy(new->m_class, item->m_class);
-       ipv6_addr_copy(&new->m_addr, &item->m_addr);
+       new->m_addr = item->m_addr;
 }
 static void update(struct cache_head *cnew, struct cache_head *citem)
 {
@@ -220,7 +220,7 @@ static int ip_map_parse(struct cache_detail *cd,
                ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
                                &sin6.sin6_addr);
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                memcpy(&sin6, &address.s6, sizeof(sin6));
                break;
@@ -274,7 +274,7 @@ static int ip_map_show(struct seq_file *m,
        }
        im = container_of(h, struct ip_map, h);
        /* class addr domain */
-       ipv6_addr_copy(&addr, &im->m_addr);
+       addr = im->m_addr;
 
        if (test_bit(CACHE_VALID, &h->flags) &&
            !test_bit(CACHE_NEGATIVE, &h->flags))
@@ -297,7 +297,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
        struct cache_head *ch;
 
        strcpy(ip.m_class, class);
-       ipv6_addr_copy(&ip.m_addr, addr);
+       ip.m_addr = *addr;
        ch = sunrpc_cache_lookup(cd, &ip.h,
                                 hash_str(class, IP_HASHBITS) ^
                                 hash_ip6(*addr));
index 71bed1c..4653286 100644 (file)
@@ -157,7 +157,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
                        cmh->cmsg_level = SOL_IPV6;
                        cmh->cmsg_type = IPV6_PKTINFO;
                        pki->ipi6_ifindex = daddr->sin6_scope_id;
-                       ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
+                       pki->ipi6_addr = daddr->sin6_addr;
                        cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
                }
                break;
@@ -523,7 +523,7 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
                return 0;
 
        daddr->sin6_family = AF_INET6;
-       ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+       daddr->sin6_addr = pki->ipi6_addr;
        daddr->sin6_scope_id = pki->ipi6_ifindex;
        return 1;
 }
index 1f1ef70..2e4444f 100644 (file)
@@ -121,15 +121,16 @@ config CFG80211_WEXT
 
 config WIRELESS_EXT_SYSFS
        bool "Wireless extensions sysfs files"
-       default y
        depends on WEXT_CORE && SYSFS
        help
          This option enables the deprecated wireless statistics
          files in /sys/class/net/*/wireless/. The same information
          is available via the ioctls as well.
 
-         Say Y if you have programs using it, like old versions of
-         hal.
+         Say N. If you know you have ancient tools requiring it,
+         like very old versions of hal (prior to 0.5.12 release),
+         say Y and update the tools as soon as possible as this
+         option will be removed soon.
 
 config LIB80211
        tristate "Common routines for IEEE802.11 drivers"
index 17cd0c0..2fcfe09 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright 2009      Johannes Berg <johannes@sipsolutions.net>
  */
 
+#include <linux/export.h>
 #include <net/cfg80211.h>
 #include "core.h"
 
@@ -44,9 +45,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
        return chan;
 }
 
-static bool can_beacon_sec_chan(struct wiphy *wiphy,
-                               struct ieee80211_channel *chan,
-                               enum nl80211_channel_type channel_type)
+int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
+                                 struct ieee80211_channel *chan,
+                                 enum nl80211_channel_type channel_type)
 {
        struct ieee80211_channel *sec_chan;
        int diff;
@@ -75,6 +76,7 @@ static bool can_beacon_sec_chan(struct wiphy *wiphy,
 
        return true;
 }
+EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
 
 int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
                      struct wireless_dev *wdev, int freq,
@@ -109,8 +111,8 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
                switch (channel_type) {
                case NL80211_CHAN_HT40PLUS:
                case NL80211_CHAN_HT40MINUS:
-                       if (!can_beacon_sec_chan(&rdev->wiphy, chan,
-                                                channel_type)) {
+                       if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
+                                                         channel_type)) {
                                printk(KERN_DEBUG
                                       "cfg80211: Secondary channel not "
                                       "allowed to initiate communication\n");
index 220f3bd..ccdfed8 100644 (file)
@@ -492,6 +492,10 @@ int wiphy_register(struct wiphy *wiphy)
                    !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
                return -EINVAL;
 
+       if (WARN_ON(wiphy->ap_sme_capa &&
+                   !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
+               return -EINVAL;
+
        if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
                return -EINVAL;
 
index b9ec306..fb08c28 100644 (file)
@@ -54,6 +54,8 @@ struct cfg80211_registered_device {
        int opencount; /* also protected by devlist_mtx */
        wait_queue_head_t dev_wait;
 
+       u32 ap_beacons_nlpid;
+
        /* BSSes/scanning */
        spinlock_t bss_lock;
        struct list_head bss_list;
@@ -339,13 +341,17 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
                          const u8 *bssid, const u8 *prev_bssid,
                          const u8 *ssid, int ssid_len,
                          const u8 *ie, int ie_len, bool use_mfp,
-                         struct cfg80211_crypto_settings *crypt);
+                         struct cfg80211_crypto_settings *crypt,
+                         u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+                         struct ieee80211_ht_cap *ht_capa_mask);
 int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
                        struct net_device *dev, struct ieee80211_channel *chan,
                        const u8 *bssid, const u8 *prev_bssid,
                        const u8 *ssid, int ssid_len,
                        const u8 *ie, int ie_len, bool use_mfp,
-                       struct cfg80211_crypto_settings *crypt);
+                       struct cfg80211_crypto_settings *crypt,
+                       u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+                       struct ieee80211_ht_cap *ht_capa_mask);
 int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
                           struct net_device *dev, const u8 *bssid,
                           const u8 *ie, int ie_len, u16 reason,
@@ -376,7 +382,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                          enum nl80211_channel_type channel_type,
                          bool channel_type_valid, unsigned int wait,
                          const u8 *buf, size_t len, bool no_cck,
-                         u64 *cookie);
+                         bool dont_wait_for_ack, u64 *cookie);
+void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
+                              const struct ieee80211_ht_cap *ht_capa_mask);
 
 /* SME */
 int __cfg80211_connect(struct cfg80211_registered_device *rdev,
index b7b7868..8c550df 100644 (file)
@@ -20,6 +20,7 @@
  * interface
  */
 #define MESH_PREQ_MIN_INT      10
+#define MESH_PERR_MIN_INT      100
 #define MESH_DIAM_TRAVERSAL_TIME 50
 
 /*
@@ -47,6 +48,7 @@ const struct mesh_config default_mesh_config = {
        .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
        .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
        .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
+       .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
        .dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME,
        .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
        .path_refresh_time = MESH_PATH_REFRESH_TIME,
index 21fc970..438dfc1 100644 (file)
@@ -501,13 +501,32 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
        return err;
 }
 
+/*  Do a logical ht_capa &= ht_capa_mask.  */
+void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
+                              const struct ieee80211_ht_cap *ht_capa_mask)
+{
+       int i;
+       u8 *p1, *p2;
+       if (!ht_capa_mask) {
+               memset(ht_capa, 0, sizeof(*ht_capa));
+               return;
+       }
+
+       p1 = (u8*)(ht_capa);
+       p2 = (u8*)(ht_capa_mask);
+       for (i = 0; i<sizeof(*ht_capa); i++)
+               p1[i] &= p2[i];
+}
+
 int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
                          struct net_device *dev,
                          struct ieee80211_channel *chan,
                          const u8 *bssid, const u8 *prev_bssid,
                          const u8 *ssid, int ssid_len,
                          const u8 *ie, int ie_len, bool use_mfp,
-                         struct cfg80211_crypto_settings *crypt)
+                         struct cfg80211_crypto_settings *crypt,
+                         u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+                         struct ieee80211_ht_cap *ht_capa_mask)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_assoc_request req;
@@ -537,6 +556,15 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
        memcpy(&req.crypto, crypt, sizeof(req.crypto));
        req.use_mfp = use_mfp;
        req.prev_bssid = prev_bssid;
+       req.flags = assoc_flags;
+       if (ht_capa)
+               memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa));
+       if (ht_capa_mask)
+               memcpy(&req.ht_capa_mask, ht_capa_mask,
+                      sizeof(req.ht_capa_mask));
+       cfg80211_oper_and_ht_capa(&req.ht_capa_mask,
+                                 rdev->wiphy.ht_capa_mod_mask);
+
        req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
                                   WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
        if (!req.bss) {
@@ -574,14 +602,17 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
                        const u8 *bssid, const u8 *prev_bssid,
                        const u8 *ssid, int ssid_len,
                        const u8 *ie, int ie_len, bool use_mfp,
-                       struct cfg80211_crypto_settings *crypt)
+                       struct cfg80211_crypto_settings *crypt,
+                       u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
+                       struct ieee80211_ht_cap *ht_capa_mask)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
 
        wdev_lock(wdev);
        err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
-                                   ssid, ssid_len, ie, ie_len, use_mfp, crypt);
+                                   ssid, ssid_len, ie, ie_len, use_mfp, crypt,
+                                   assoc_flags, ht_capa, ht_capa_mask);
        wdev_unlock(wdev);
 
        return err;
@@ -879,6 +910,9 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
        }
 
        spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+       if (nlpid == wdev->ap_unexpected_nlpid)
+               wdev->ap_unexpected_nlpid = 0;
 }
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
@@ -901,7 +935,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                          enum nl80211_channel_type channel_type,
                          bool channel_type_valid, unsigned int wait,
                          const u8 *buf, size_t len, bool no_cck,
-                         u64 *cookie)
+                         bool dont_wait_for_ack, u64 *cookie)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        const struct ieee80211_mgmt *mgmt;
@@ -992,7 +1026,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
        /* Transmit the Action frame as requested by user space */
        return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
                                  channel_type, channel_type_valid,
-                                 wait, buf, len, no_cck, cookie);
+                                 wait, buf, len, no_cck, dont_wait_for_ack,
+                                 cookie);
 }
 
 bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1107,3 +1142,30 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
        nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
 }
 EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
+
+bool cfg80211_rx_spurious_frame(struct net_device *dev,
+                               const u8 *addr, gfp_t gfp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+                   wdev->iftype != NL80211_IFTYPE_P2P_GO))
+               return false;
+
+       return nl80211_unexpected_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
+
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
+                                       const u8 *addr, gfp_t gfp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
+                   wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+                   wdev->iftype != NL80211_IFTYPE_AP_VLAN))
+               return false;
+
+       return nl80211_unexpected_4addr_frame(dev, addr, gfp);
+}
+EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
index ffafda5..ba43966 100644 (file)
@@ -98,7 +98,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
        [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 },
        [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG },
-       [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+       [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
        [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 },
 
        [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
@@ -196,6 +196,15 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
        [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
        [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
+       [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
+       [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
+                                     .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 },
+       [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG },
+       [NL80211_ATTR_HT_CAPABILITY_MASK] = {
+               .len = NL80211_HT_CAPABILITY_LEN
+       },
+       [NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
 };
 
 /* policy for the key attributes */
@@ -203,7 +212,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
        [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
        [NL80211_KEY_IDX] = { .type = NLA_U8 },
        [NL80211_KEY_CIPHER] = { .type = NLA_U32 },
-       [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+       [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 },
        [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
        [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
        [NL80211_KEY_TYPE] = { .type = NLA_U32 },
@@ -758,6 +767,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
                    dev->wiphy.available_antennas_rx);
 
+       if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD)
+               NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
+                           dev->wiphy.probe_resp_offload);
+
        if ((dev->wiphy.available_antennas_tx ||
             dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
                u32 tx_ant = 0, rx_ant = 0;
@@ -874,7 +887,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        CMD(set_pmksa, SET_PMKSA);
        CMD(del_pmksa, DEL_PMKSA);
        CMD(flush_pmksa, FLUSH_PMKSA);
-       CMD(remain_on_channel, REMAIN_ON_CHANNEL);
+       if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
+               CMD(remain_on_channel, REMAIN_ON_CHANNEL);
        CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
        CMD(mgmt_tx, FRAME);
        CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
@@ -890,6 +904,16 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        }
        if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
                CMD(sched_scan_start, START_SCHED_SCAN);
+       CMD(probe_client, PROBE_CLIENT);
+       CMD(set_noack_map, SET_NOACK_MAP);
+       if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
+               i++;
+               NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
+       }
+
+#ifdef CONFIG_NL80211_TESTMODE
+       CMD(testmode_cmd, TESTMODE);
+#endif
 
 #undef CMD
 
@@ -905,11 +929,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
 
        nla_nest_end(msg, nl_cmds);
 
-       if (dev->ops->remain_on_channel)
+       if (dev->ops->remain_on_channel &&
+           dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
                NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
                            dev->wiphy.max_remain_on_channel_duration);
 
-       if (dev->ops->mgmt_tx_cancel_wait)
+       if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
                NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
 
        if (mgmt_stypes) {
@@ -1007,6 +1032,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
        if (nl80211_put_iface_combinations(&dev->wiphy, msg))
                goto nla_put_failure;
 
+       if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME)
+               NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME,
+                           dev->wiphy.ap_sme_capa);
+
+       NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
+
+       if (dev->wiphy.ht_capa_mod_mask)
+               NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
+                       sizeof(*dev->wiphy.ht_capa_mod_mask),
+                       dev->wiphy.ht_capa_mod_mask);
+
        return genlmsg_end(msg, hdr);
 
  nla_put_failure:
@@ -1725,6 +1761,23 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
        return rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
 }
 
+static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       u16 noack_map;
+
+       if (!info->attrs[NL80211_ATTR_NOACK_MAP])
+               return -EINVAL;
+
+       if (!rdev->ops->set_noack_map)
+               return -EOPNOTSUPP;
+
+       noack_map = nla_get_u16(info->attrs[NL80211_ATTR_NOACK_MAP]);
+
+       return rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map);
+}
+
 struct get_key_cookie {
        struct sk_buff *msg;
        int error;
@@ -2155,6 +2208,13 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
                        nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
        }
 
+       if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
+               params.probe_resp =
+                       nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
+               params.probe_resp_len =
+                       nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
+       }
+
        err = call(&rdev->wiphy, dev, &params);
        if (!err && params.interval)
                wdev->beacon_interval = params.interval;
@@ -2453,26 +2513,34 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
 /*
  * Get vlan interface making sure it is running and on the right wiphy.
  */
-static int get_vlan(struct genl_info *info,
-                   struct cfg80211_registered_device *rdev,
-                   struct net_device **vlan)
+static struct net_device *get_vlan(struct genl_info *info,
+                                  struct cfg80211_registered_device *rdev)
 {
        struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN];
-       *vlan = NULL;
-
-       if (vlanattr) {
-               *vlan = dev_get_by_index(genl_info_net(info),
-                                        nla_get_u32(vlanattr));
-               if (!*vlan)
-                       return -ENODEV;
-               if (!(*vlan)->ieee80211_ptr)
-                       return -EINVAL;
-               if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy)
-                       return -EINVAL;
-               if (!netif_running(*vlan))
-                       return -ENETDOWN;
+       struct net_device *v;
+       int ret;
+
+       if (!vlanattr)
+               return NULL;
+
+       v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr));
+       if (!v)
+               return ERR_PTR(-ENODEV);
+
+       if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) {
+               ret = -EINVAL;
+               goto error;
        }
-       return 0;
+
+       if (!netif_running(v)) {
+               ret = -ENETDOWN;
+               goto error;
+       }
+
+       return v;
+ error:
+       dev_put(v);
+       return ERR_PTR(ret);
 }
 
 static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
@@ -2522,9 +2590,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                params.plink_state =
                    nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
 
-       err = get_vlan(info, rdev, &params.vlan);
-       if (err)
-               goto out;
+       params.vlan = get_vlan(info, rdev);
+       if (IS_ERR(params.vlan))
+               return PTR_ERR(params.vlan);
 
        /* validate settings */
        err = 0;
@@ -2692,9 +2760,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
              (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
                return -EINVAL;
 
-       err = get_vlan(info, rdev, &params.vlan);
-       if (err)
-               goto out;
+       params.vlan = get_vlan(info, rdev);
+       if (IS_ERR(params.vlan))
+               return PTR_ERR(params.vlan);
 
        /* validate settings */
        err = 0;
@@ -3127,6 +3195,8 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
                        cur_params.dot11MeshHWMPactivePathTimeout);
        NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
                        cur_params.dot11MeshHWMPpreqMinInterval);
+       NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+                       cur_params.dot11MeshHWMPperrMinInterval);
        NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
                        cur_params.dot11MeshHWMPnetDiameterTraversalTime);
        NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
@@ -3161,6 +3231,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
        [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
        [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
        [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
+       [NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL] = { .type = NLA_U16 },
        [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
        [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
        [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
@@ -3235,6 +3306,9 @@ do {\
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
                        mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
                        nla_get_u16);
+       FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
+                       mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+                       nla_get_u16);
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
                        dot11MeshHWMPnetDiameterTraversalTime,
                        mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
@@ -3357,6 +3431,9 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
 
        NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
                cfg80211_regdomain->alpha2);
+       if (cfg80211_regdomain->dfs_region)
+               NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
+                          cfg80211_regdomain->dfs_region);
 
        nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
        if (!nl_reg_rules)
@@ -3415,6 +3492,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
        char *alpha2 = NULL;
        int rem_reg_rules = 0, r = 0;
        u32 num_rules = 0, rule_idx = 0, size_of_regd;
+       u8 dfs_region = 0;
        struct ieee80211_regdomain *rd = NULL;
 
        if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
@@ -3425,6 +3503,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
 
        alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
 
+       if (info->attrs[NL80211_ATTR_DFS_REGION])
+               dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]);
+
        nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
                        rem_reg_rules) {
                num_rules++;
@@ -3452,6 +3533,13 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
        rd->alpha2[0] = alpha2[0];
        rd->alpha2[1] = alpha2[1];
 
+       /*
+        * Disable DFS master mode if the DFS region was
+        * not supported or known on this kernel.
+        */
+       if (reg_supported_dfs_region(dfs_region))
+               rd->dfs_region = dfs_region;
+
        nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
                        rem_reg_rules) {
                nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
@@ -4359,6 +4447,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
        const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
        int err, ssid_len, ie_len = 0;
        bool use_mfp = false;
+       u32 flags = 0;
+       struct ieee80211_ht_cap *ht_capa = NULL;
+       struct ieee80211_ht_cap *ht_capa_mask = NULL;
 
        if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
@@ -4402,11 +4493,25 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
        if (info->attrs[NL80211_ATTR_PREV_BSSID])
                prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
 
+       if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
+               flags |= ASSOC_REQ_DISABLE_HT;
+
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+               ht_capa_mask =
+                       nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]);
+
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+               if (!ht_capa_mask)
+                       return -EINVAL;
+               ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
+       }
+
        err = nl80211_crypto_settings(rdev, info, &crypto, 1);
        if (!err)
                err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
                                          ssid, ssid_len, ie, ie_len, use_mfp,
-                                         &crypto);
+                                         &crypto, flags, ht_capa,
+                                         ht_capa_mask);
 
        return err;
 }
@@ -4577,13 +4682,41 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
                ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
        }
 
-       ibss.channel = ieee80211_get_channel(wiphy,
-               nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+       if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+               enum nl80211_channel_type channel_type;
+
+               channel_type = nla_get_u32(
+                               info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+               if (channel_type != NL80211_CHAN_NO_HT &&
+                   channel_type != NL80211_CHAN_HT20 &&
+                   channel_type != NL80211_CHAN_HT40MINUS &&
+                   channel_type != NL80211_CHAN_HT40PLUS)
+                       return -EINVAL;
+
+               if (channel_type != NL80211_CHAN_NO_HT &&
+                   !(wiphy->features & NL80211_FEATURE_HT_IBSS))
+                       return -EINVAL;
+
+               ibss.channel_type = channel_type;
+       } else {
+               ibss.channel_type = NL80211_CHAN_NO_HT;
+       }
+
+       ibss.channel = rdev_freq_to_chan(rdev,
+               nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+               ibss.channel_type);
        if (!ibss.channel ||
            ibss.channel->flags & IEEE80211_CHAN_NO_IBSS ||
            ibss.channel->flags & IEEE80211_CHAN_DISABLED)
                return -EINVAL;
 
+       /* Both channels should be able to initiate communication */
+       if ((ibss.channel_type == NL80211_CHAN_HT40PLUS ||
+            ibss.channel_type == NL80211_CHAN_HT40MINUS) &&
+           !cfg80211_can_beacon_sec_chan(&rdev->wiphy, ibss.channel,
+                                         ibss.channel_type))
+               return -EINVAL;
+
        ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
        ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
 
@@ -4896,6 +5029,22 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
                        return PTR_ERR(connkeys);
        }
 
+       if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
+               connect.flags |= ASSOC_REQ_DISABLE_HT;
+
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+               memcpy(&connect.ht_capa_mask,
+                      nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+                      sizeof(connect.ht_capa_mask));
+
+       if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+               if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+                       return -EINVAL;
+               memcpy(&connect.ht_capa,
+                      nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+                      sizeof(connect.ht_capa));
+       }
+
        err = cfg80211_connect(rdev, dev, &connect, connkeys);
        if (err)
                kfree(connkeys);
@@ -5083,7 +5232,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
            duration > rdev->wiphy.max_remain_on_channel_duration)
                return -EINVAL;
 
-       if (!rdev->ops->remain_on_channel)
+       if (!rdev->ops->remain_on_channel ||
+           !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
                return -EOPNOTSUPP;
 
        if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5271,12 +5421,13 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        bool channel_type_valid = false;
        u32 freq;
        int err;
-       void *hdr;
+       void *hdr = NULL;
        u64 cookie;
-       struct sk_buff *msg;
+       struct sk_buff *msg = NULL;
        unsigned int wait = 0;
-       bool offchan;
-       bool no_cck;
+       bool offchan, no_cck, dont_wait_for_ack;
+
+       dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK];
 
        if (!info->attrs[NL80211_ATTR_FRAME] ||
            !info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5295,7 +5446,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        if (info->attrs[NL80211_ATTR_DURATION]) {
-               if (!rdev->ops->mgmt_tx_cancel_wait)
+               if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
                        return -EINVAL;
                wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
        }
@@ -5313,6 +5464,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
 
        offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
 
+       if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
+               return -EINVAL;
+
        no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
        freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -5320,29 +5474,36 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        if (chan == NULL)
                return -EINVAL;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return -ENOMEM;
+       if (!dont_wait_for_ack) {
+               msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               if (!msg)
+                       return -ENOMEM;
 
-       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
-                            NL80211_CMD_FRAME);
+               hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+                                    NL80211_CMD_FRAME);
 
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
-               goto free_msg;
+               if (IS_ERR(hdr)) {
+                       err = PTR_ERR(hdr);
+                       goto free_msg;
+               }
        }
+
        err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
                                    channel_type_valid, wait,
                                    nla_data(info->attrs[NL80211_ATTR_FRAME]),
                                    nla_len(info->attrs[NL80211_ATTR_FRAME]),
-                                   no_cck, &cookie);
+                                   no_cck, dont_wait_for_ack, &cookie);
        if (err)
                goto free_msg;
 
-       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+       if (msg) {
+               NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
 
-       genlmsg_end(msg, hdr);
-       return genlmsg_reply(msg, info);
+               genlmsg_end(msg, hdr);
+               return genlmsg_reply(msg, info);
+       }
+
+       return 0;
 
  nla_put_failure:
        err = -ENOBUFS;
@@ -5540,6 +5701,11 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
        setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
        setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
 
+       if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
+           !nl80211_parse_mcast_rate(rdev, setup.mcast_rate,
+                           nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
+                       return -EINVAL;
+
        if (info->attrs[NL80211_ATTR_MESH_SETUP]) {
                /* parse additional setup parameters if given */
                err = nl80211_parse_mesh_setup(info, &setup);
@@ -5832,6 +5998,91 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
        return err;
 }
 
+static int nl80211_register_unexpected_frame(struct sk_buff *skb,
+                                            struct genl_info *info)
+{
+       struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+       if (wdev->iftype != NL80211_IFTYPE_AP &&
+           wdev->iftype != NL80211_IFTYPE_P2P_GO)
+               return -EINVAL;
+
+       if (wdev->ap_unexpected_nlpid)
+               return -EBUSY;
+
+       wdev->ap_unexpected_nlpid = info->snd_pid;
+       return 0;
+}
+
+static int nl80211_probe_client(struct sk_buff *skb,
+                               struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+       struct net_device *dev = info->user_ptr[1];
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct sk_buff *msg;
+       void *hdr;
+       const u8 *addr;
+       u64 cookie;
+       int err;
+
+       if (wdev->iftype != NL80211_IFTYPE_AP &&
+           wdev->iftype != NL80211_IFTYPE_P2P_GO)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL80211_ATTR_MAC])
+               return -EINVAL;
+
+       if (!rdev->ops->probe_client)
+               return -EOPNOTSUPP;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+                            NL80211_CMD_PROBE_CLIENT);
+
+       if (IS_ERR(hdr)) {
+               err = PTR_ERR(hdr);
+               goto free_msg;
+       }
+
+       addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+       err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie);
+       if (err)
+               goto free_msg;
+
+       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+       genlmsg_end(msg, hdr);
+
+       return genlmsg_reply(msg, info);
+
+ nla_put_failure:
+       err = -ENOBUFS;
+ free_msg:
+       nlmsg_free(msg);
+       return err;
+}
+
+static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg80211_registered_device *rdev = info->user_ptr[0];
+
+       if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS))
+               return -EOPNOTSUPP;
+
+       if (rdev->ap_beacons_nlpid)
+               return -EBUSY;
+
+       rdev->ap_beacons_nlpid = info->snd_pid;
+
+       return 0;
+}
+
 #define NL80211_FLAG_NEED_WIPHY                0x01
 #define NL80211_FLAG_NEED_NETDEV       0x02
 #define NL80211_FLAG_NEED_RTNL         0x04
@@ -6387,6 +6638,39 @@ static struct genl_ops nl80211_ops[] = {
                .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
                                  NL80211_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL80211_CMD_UNEXPECTED_FRAME,
+               .doit = nl80211_register_unexpected_frame,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_PROBE_CLIENT,
+               .doit = nl80211_probe_client,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_REGISTER_BEACONS,
+               .doit = nl80211_register_beacons,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL80211_CMD_SET_NOACK_MAP,
+               .doit = nl80211_set_noack_map,
+               .policy = nl80211_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_NETDEV |
+                                 NL80211_FLAG_NEED_RTNL,
+       },
+
 };
 
 static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -6639,10 +6923,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
        if (wiphy_idx_valid(request->wiphy_idx))
                NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        rcu_read_lock();
        genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -6678,10 +6959,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
        NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
        NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -6762,10 +7040,7 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
        NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT);
        NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -6821,10 +7096,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
        if (resp_ie)
                NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -6862,10 +7134,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
        if (resp_ie)
                NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -6903,10 +7172,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
        if (ie)
                NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, GFP_KERNEL);
@@ -6939,10 +7205,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
        NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
        NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -6977,10 +7240,7 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
        if (ie_len && ie)
                NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7019,10 +7279,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
        if (tsc)
                NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7073,10 +7330,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
                goto nla_put_failure;
        nla_nest_end(msg, nl_freq);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        rcu_read_lock();
        genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
@@ -7119,10 +7373,7 @@ static void nl80211_send_remain_on_chan_event(
        if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
                NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7193,10 +7444,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
        NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
        NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7207,13 +7455,68 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
        nlmsg_free(msg);
 }
 
+static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
+                                      const u8 *addr, gfp_t gfp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct sk_buff *msg;
+       void *hdr;
+       int err;
+       u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid);
+
+       if (!nlpid)
+               return false;
+
+       msg = nlmsg_new(100, gfp);
+       if (!msg)
+               return true;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+       if (!hdr) {
+               nlmsg_free(msg);
+               return true;
+       }
+
+       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+
+       err = genlmsg_end(msg, hdr);
+       if (err < 0) {
+               nlmsg_free(msg);
+               return true;
+       }
+
+       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       return true;
+
+ nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       nlmsg_free(msg);
+       return true;
+}
+
+bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp)
+{
+       return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
+                                         addr, gfp);
+}
+
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+                                   const u8 *addr, gfp_t gfp)
+{
+       return __nl80211_unexpected_frame(dev,
+                                         NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
+                                         addr, gfp);
+}
+
 int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
                      struct net_device *netdev, u32 nlpid,
                      int freq, const u8 *buf, size_t len, gfp_t gfp)
 {
        struct sk_buff *msg;
        void *hdr;
-       int err;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
        if (!msg)
@@ -7230,16 +7533,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
        NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
        NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
 
-       err = genlmsg_end(msg, hdr);
-       if (err < 0) {
-               nlmsg_free(msg);
-               return err;
-       }
+       genlmsg_end(msg, hdr);
 
-       err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
-       if (err < 0)
-               return err;
-       return 0;
+       return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
 
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
@@ -7272,10 +7568,7 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
        if (ack)
                NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
        return;
@@ -7317,10 +7610,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
 
        nla_nest_end(msg, pinfoattr);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7362,10 +7652,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
 
        nla_nest_end(msg, rekey_attr);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7408,10 +7695,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
 
        nla_nest_end(msg, attr);
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -7453,7 +7737,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
 
        nla_nest_end(msg, pinfoattr);
 
-       if (genlmsg_end(msg, hdr) < 0) {
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+                               nl80211_mlme_mcgrp.id, gfp);
+       return;
+
+ nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       nlmsg_free(msg);
+}
+
+void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
+                          u64 cookie, bool acked, gfp_t gfp)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct sk_buff *msg;
+       void *hdr;
+       int err;
+
+       msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT);
+       if (!hdr) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+       NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+       NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
+       NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+       if (acked)
+               NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+
+       err = genlmsg_end(msg, hdr);
+       if (err < 0) {
                nlmsg_free(msg);
                return;
        }
@@ -7466,6 +7788,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
        genlmsg_cancel(msg, hdr);
        nlmsg_free(msg);
 }
+EXPORT_SYMBOL(cfg80211_probe_status);
+
+void cfg80211_report_obss_beacon(struct wiphy *wiphy,
+                                const u8 *frame, size_t len,
+                                int freq, gfp_t gfp)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct sk_buff *msg;
+       void *hdr;
+       u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid);
+
+       if (!nlpid)
+               return;
+
+       msg = nlmsg_new(len + 100, gfp);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
+       if (!hdr) {
+               nlmsg_free(msg);
+               return;
+       }
+
+       NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+       if (freq)
+               NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+       NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame);
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+       return;
+
+ nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+       nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_report_obss_beacon);
 
 static int nl80211_netlink_notify(struct notifier_block * nb,
                                  unsigned long state,
@@ -7480,9 +7841,12 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
        rcu_read_lock();
 
-       list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
+       list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
                        cfg80211_mlme_unregister_socket(wdev, notify->pid);
+               if (rdev->ap_beacons_nlpid == notify->pid)
+                       rdev->ap_beacons_nlpid = 0;
+       }
 
        rcu_read_unlock();
 
index f24a1fb..12bf4d1 100644 (file)
@@ -117,4 +117,9 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
                                    struct net_device *netdev, int index,
                                    const u8 *bssid, bool preauth, gfp_t gfp);
 
+bool nl80211_unexpected_frame(struct net_device *dev,
+                             const u8 *addr, gfp_t gfp);
+bool nl80211_unexpected_4addr_frame(struct net_device *dev,
+                                   const u8 *addr, gfp_t gfp);
+
 #endif /* __NET_WIRELESS_NL80211_H */
index 3302c56..70b171a 100644 (file)
@@ -1139,6 +1139,8 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
        if (ignore_reg_update(wiphy, initiator))
                return;
 
+       last_request->dfs_region = cfg80211_regdomain->dfs_region;
+
        for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                if (wiphy->bands[band])
                        handle_band(wiphy, band, initiator);
@@ -1962,6 +1964,42 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
        }
 }
 
+bool reg_supported_dfs_region(u8 dfs_region)
+{
+       switch (dfs_region) {
+       case NL80211_DFS_UNSET:
+       case NL80211_DFS_FCC:
+       case NL80211_DFS_ETSI:
+       case NL80211_DFS_JP:
+               return true;
+       default:
+               REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
+                             dfs_region);
+               return false;
+       }
+}
+
+static void print_dfs_region(u8 dfs_region)
+{
+       if (!dfs_region)
+               return;
+
+       switch (dfs_region) {
+       case NL80211_DFS_FCC:
+               pr_info(" DFS Master region FCC");
+               break;
+       case NL80211_DFS_ETSI:
+               pr_info(" DFS Master region ETSI");
+               break;
+       case NL80211_DFS_JP:
+               pr_info(" DFS Master region JP");
+               break;
+       default:
+               pr_info(" DFS Master region Uknown");
+               break;
+       }
+}
+
 static void print_regdomain(const struct ieee80211_regdomain *rd)
 {
 
@@ -1989,6 +2027,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
                        pr_info("Regulatory domain changed to country: %c%c\n",
                                rd->alpha2[0], rd->alpha2[1]);
        }
+       print_dfs_region(rd->dfs_region);
        print_rd_rules(rd);
 }
 
index 4a56799..786e414 100644 (file)
@@ -5,6 +5,7 @@ extern const struct ieee80211_regdomain *cfg80211_regdomain;
 
 bool is_world_regdom(const char *alpha2);
 bool reg_is_valid_request(const char *alpha2);
+bool reg_supported_dfs_region(u8 dfs_region);
 
 int regulatory_hint_user(const char *alpha2);
 
index dc23b31..31119e3 100644 (file)
@@ -355,8 +355,8 @@ static bool is_mesh(struct cfg80211_bss *a,
            sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
 }
 
-static int cmp_bss(struct cfg80211_bss *a,
-                  struct cfg80211_bss *b)
+static int cmp_bss_core(struct cfg80211_bss *a,
+                       struct cfg80211_bss *b)
 {
        int r;
 
@@ -378,7 +378,15 @@ static int cmp_bss(struct cfg80211_bss *a,
                               b->len_information_elements);
        }
 
-       r = memcmp(a->bssid, b->bssid, ETH_ALEN);
+       return memcmp(a->bssid, b->bssid, ETH_ALEN);
+}
+
+static int cmp_bss(struct cfg80211_bss *a,
+                  struct cfg80211_bss *b)
+{
+       int r;
+
+       r = cmp_bss_core(a, b);
        if (r)
                return r;
 
@@ -389,6 +397,52 @@ static int cmp_bss(struct cfg80211_bss *a,
                       b->len_information_elements);
 }
 
+static int cmp_hidden_bss(struct cfg80211_bss *a,
+                  struct cfg80211_bss *b)
+{
+       const u8 *ie1;
+       const u8 *ie2;
+       int i;
+       int r;
+
+       r = cmp_bss_core(a, b);
+       if (r)
+               return r;
+
+       ie1 = cfg80211_find_ie(WLAN_EID_SSID,
+                       a->information_elements,
+                       a->len_information_elements);
+       ie2 = cfg80211_find_ie(WLAN_EID_SSID,
+                       b->information_elements,
+                       b->len_information_elements);
+
+       /* Key comparator must use same algorithm in any rb-tree
+        * search function (order is important), otherwise ordering
+        * of items in the tree is broken and search gives incorrect
+        * results. This code uses same order as cmp_ies() does. */
+
+       /* sort missing IE before (left of) present IE */
+       if (!ie1)
+               return -1;
+       if (!ie2)
+               return 1;
+
+       /* zero-size SSID is used as an indication of the hidden bss */
+       if (!ie2[1])
+               return 0;
+
+       /* sort by length first, then by contents */
+       if (ie1[1] != ie2[1])
+               return ie2[1] - ie1[1];
+
+       /* zeroed SSID ie is another indication of a hidden bss */
+       for (i = 0; i < ie2[1]; i++)
+               if (ie2[i + 2])
+                       return -1;
+
+       return 0;
+}
+
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      struct ieee80211_channel *channel,
                                      const u8 *bssid,
@@ -505,6 +559,48 @@ rb_find_bss(struct cfg80211_registered_device *dev,
 }
 
 static struct cfg80211_internal_bss *
+rb_find_hidden_bss(struct cfg80211_registered_device *dev,
+           struct cfg80211_internal_bss *res)
+{
+       struct rb_node *n = dev->bss_tree.rb_node;
+       struct cfg80211_internal_bss *bss;
+       int r;
+
+       while (n) {
+               bss = rb_entry(n, struct cfg80211_internal_bss, rbn);
+               r = cmp_hidden_bss(&res->pub, &bss->pub);
+
+               if (r == 0)
+                       return bss;
+               else if (r < 0)
+                       n = n->rb_left;
+               else
+                       n = n->rb_right;
+       }
+
+       return NULL;
+}
+
+static void
+copy_hidden_ies(struct cfg80211_internal_bss *res,
+                struct cfg80211_internal_bss *hidden)
+{
+       if (unlikely(res->pub.beacon_ies))
+               return;
+       if (WARN_ON(!hidden->pub.beacon_ies))
+               return;
+
+       res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC);
+       if (unlikely(!res->pub.beacon_ies))
+               return;
+
+       res->beacon_ies_allocated = true;
+       res->pub.len_beacon_ies = hidden->pub.len_beacon_ies;
+       memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies,
+                       res->pub.len_beacon_ies);
+}
+
+static struct cfg80211_internal_bss *
 cfg80211_bss_update(struct cfg80211_registered_device *dev,
                    struct cfg80211_internal_bss *res)
 {
@@ -607,6 +703,21 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
                kref_put(&res->ref, bss_release);
        } else {
+               struct cfg80211_internal_bss *hidden;
+
+               /* First check if the beacon is a probe response from
+                * a hidden bss. If so, copy beacon ies (with nullified
+                * ssid) into the probe response bss entry (with real ssid).
+                * It is required basically for PSM implementation
+                * (probe responses do not contain tim ie) */
+
+               /* TODO: The code is not trying to update existing probe
+                * response bss entries when beacon ies are
+                * getting changed. */
+               hidden = rb_find_hidden_bss(dev, res);
+               if (hidden)
+                       copy_hidden_ies(res, hidden);
+
                /* this "consumes" the reference */
                list_add_tail(&res->list, &dev->bss_list);
                rb_insert_bss(dev, res);
index 0acfdc9..f0c900c 100644 (file)
@@ -190,7 +190,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
                                            prev_bssid,
                                            params->ssid, params->ssid_len,
                                            params->ie, params->ie_len,
-                                           false, &params->crypto);
+                                           false, &params->crypto,
+                                           params->flags, &params->ht_capa,
+                                           &params->ht_capa_mask);
                if (err)
                        __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
                                               NULL, 0,
@@ -774,6 +776,9 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
                wdev->connect_keys = NULL;
        }
 
+       cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
+                                 rdev->wiphy.ht_capa_mod_mask);
+
        if (connkeys && connkeys->def >= 0) {
                int idx;
                u32 cipher;
index 4dde429..9c601d5 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/bitops.h>
 #include <linux/etherdevice.h>
 #include <linux/slab.h>
-#include <linux/crc32.h>
 #include <net/cfg80211.h>
 #include <net/ip.h>
 #include "core.h"
@@ -240,17 +239,6 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
        return 0;
 }
 
-/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
-/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
-const unsigned char rfc1042_header[] __aligned(2) =
-       { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-EXPORT_SYMBOL(rfc1042_header);
-
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-const unsigned char bridge_tunnel_header[] __aligned(2) =
-       { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-EXPORT_SYMBOL(bridge_tunnel_header);
-
 unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
 {
        unsigned int hdrlen = 24;
@@ -1051,169 +1039,13 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
        return 0;
 }
 
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
-                              struct ieee802_11_elems *elems,
-                              u64 filter, u32 crc)
-{
-       size_t left = len;
-       u8 *pos = start;
-       bool calc_crc = filter != 0;
-
-       memset(elems, 0, sizeof(*elems));
-       elems->ie_start = start;
-       elems->total_len = len;
-
-       while (left >= 2) {
-               u8 id, elen;
-
-               id = *pos++;
-               elen = *pos++;
-               left -= 2;
-
-               if (elen > left)
-                       break;
-
-               if (calc_crc && id < 64 && (filter & (1ULL << id)))
-                       crc = crc32_be(crc, pos - 2, elen + 2);
-
-               switch (id) {
-               case WLAN_EID_SSID:
-                       elems->ssid = pos;
-                       elems->ssid_len = elen;
-                       break;
-               case WLAN_EID_SUPP_RATES:
-                       elems->supp_rates = pos;
-                       elems->supp_rates_len = elen;
-                       break;
-               case WLAN_EID_FH_PARAMS:
-                       elems->fh_params = pos;
-                       elems->fh_params_len = elen;
-                       break;
-               case WLAN_EID_DS_PARAMS:
-                       elems->ds_params = pos;
-                       elems->ds_params_len = elen;
-                       break;
-               case WLAN_EID_CF_PARAMS:
-                       elems->cf_params = pos;
-                       elems->cf_params_len = elen;
-                       break;
-               case WLAN_EID_TIM:
-                       if (elen >= sizeof(struct ieee80211_tim_ie)) {
-                               elems->tim = (void *)pos;
-                               elems->tim_len = elen;
-                       }
-                       break;
-               case WLAN_EID_IBSS_PARAMS:
-                       elems->ibss_params = pos;
-                       elems->ibss_params_len = elen;
-                       break;
-               case WLAN_EID_CHALLENGE:
-                       elems->challenge = pos;
-                       elems->challenge_len = elen;
-                       break;
-               case WLAN_EID_VENDOR_SPECIFIC:
-                       if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
-                           pos[2] == 0xf2) {
-                               /* Microsoft OUI (00:50:F2) */
-
-                               if (calc_crc)
-                                       crc = crc32_be(crc, pos - 2, elen + 2);
-
-                               if (pos[3] == 1) {
-                                       /* OUI Type 1 - WPA IE */
-                                       elems->wpa = pos;
-                                       elems->wpa_len = elen;
-                               } else if (elen >= 5 && pos[3] == 2) {
-                                       /* OUI Type 2 - WMM IE */
-                                       if (pos[4] == 0) {
-                                               elems->wmm_info = pos;
-                                               elems->wmm_info_len = elen;
-                                       } else if (pos[4] == 1) {
-                                               elems->wmm_param = pos;
-                                               elems->wmm_param_len = elen;
-                                       }
-                               }
-                       }
-                       break;
-               case WLAN_EID_RSN:
-                       elems->rsn = pos;
-                       elems->rsn_len = elen;
-                       break;
-               case WLAN_EID_ERP_INFO:
-                       elems->erp_info = pos;
-                       elems->erp_info_len = elen;
-                       break;
-               case WLAN_EID_EXT_SUPP_RATES:
-                       elems->ext_supp_rates = pos;
-                       elems->ext_supp_rates_len = elen;
-                       break;
-               case WLAN_EID_HT_CAPABILITY:
-                       if (elen >= sizeof(struct ieee80211_ht_cap))
-                               elems->ht_cap_elem = (void *)pos;
-                       break;
-               case WLAN_EID_HT_INFORMATION:
-                       if (elen >= sizeof(struct ieee80211_ht_info))
-                               elems->ht_info_elem = (void *)pos;
-                       break;
-               case WLAN_EID_MESH_ID:
-                       elems->mesh_id = pos;
-                       elems->mesh_id_len = elen;
-                       break;
-               case WLAN_EID_MESH_CONFIG:
-                       if (elen >= sizeof(struct ieee80211_meshconf_ie))
-                               elems->mesh_config = (void *)pos;
-                       break;
-               case WLAN_EID_PEER_MGMT:
-                       elems->peering = pos;
-                       elems->peering_len = elen;
-                       break;
-               case WLAN_EID_PREQ:
-                       elems->preq = pos;
-                       elems->preq_len = elen;
-                       break;
-               case WLAN_EID_PREP:
-                       elems->prep = pos;
-                       elems->prep_len = elen;
-                       break;
-               case WLAN_EID_PERR:
-                       elems->perr = pos;
-                       elems->perr_len = elen;
-                       break;
-               case WLAN_EID_RANN:
-                       if (elen >= sizeof(struct ieee80211_rann_ie))
-                               elems->rann = (void *)pos;
-                       break;
-               case WLAN_EID_CHANNEL_SWITCH:
-                       elems->ch_switch_elem = pos;
-                       elems->ch_switch_elem_len = elen;
-                       break;
-               case WLAN_EID_QUIET:
-                       if (!elems->quiet_elem) {
-                               elems->quiet_elem = pos;
-                               elems->quiet_elem_len = elen;
-                       }
-                       elems->num_of_quiet_elem++;
-                       break;
-               case WLAN_EID_COUNTRY:
-                       elems->country_elem = pos;
-                       elems->country_elem_len = elen;
-                       break;
-               case WLAN_EID_PWR_CONSTRAINT:
-                       elems->pwr_constr_elem = pos;
-                       elems->pwr_constr_elem_len = elen;
-                       break;
-               case WLAN_EID_TIMEOUT_INTERVAL:
-                       elems->timeout_int = pos;
-                       elems->timeout_int_len = elen;
-                       break;
-               default:
-                       break;
-               }
-
-               left -= elen;
-               pos += elen;
-       }
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+const unsigned char rfc1042_header[] __aligned(2) =
+       { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+EXPORT_SYMBOL(rfc1042_header);
 
-       return crc;
-}
-EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+const unsigned char bridge_tunnel_header[] __aligned(2) =
+       { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+EXPORT_SYMBOL(bridge_tunnel_header);
index 6897436..3c24eb9 100644 (file)
@@ -819,12 +819,24 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
                                 struct iw_freq *freq, char *extra)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct ieee80211_channel *chan;
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
                return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
        case NL80211_IFTYPE_ADHOC:
                return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
+       case NL80211_IFTYPE_MONITOR:
+               if (!rdev->ops->get_channel)
+                       return -EINVAL;
+
+               chan = rdev->ops->get_channel(wdev->wiphy);
+               if (!chan)
+                       return -EINVAL;
+               freq->m = chan->center_freq;
+               freq->e = 6;
+               return 0;
        default:
                if (!wdev->channel)
                        return -EINVAL;
index 2118d64..eb6b0b7 100644 (file)
@@ -61,8 +61,8 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
 {
        const struct flowi4 *fl4 = &fl->u.ip4;
 
-       return  addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) &&
-               addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) &&
+       return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
+               addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
                !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
                !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
                (fl4->flowi4_proto == sel->proto || !sel->proto) &&
@@ -1340,7 +1340,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        case AF_INET:
                dst_ops = &net->xfrm.xfrm4_dst_ops;
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                dst_ops = &net->xfrm.xfrm6_dst_ops;
                break;
@@ -1499,7 +1499,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                goto free_dst;
 
        /* Copy neighbour for reachability confirmation */
-       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
+       dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
 
        xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
        xfrm_init_pmtu(dst_prev);
@@ -2435,7 +2435,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                case AF_INET:
                        xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
                        break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6:
                        xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
                        break;
@@ -2485,7 +2485,7 @@ static void __net_init xfrm_dst_ops_init(struct net *net)
        afinfo = xfrm_policy_afinfo[AF_INET];
        if (afinfo)
                net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        afinfo = xfrm_policy_afinfo[AF_INET6];
        if (afinfo)
                net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
index 9414b9c..5b228f9 100644 (file)
@@ -1035,16 +1035,12 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
                        break;
 
                case AF_INET6:
-                       ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
-                                      (const struct in6_addr *)daddr);
-                       ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
-                                      (const struct in6_addr *)saddr);
+                       *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr;
+                       *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr;
                        x->sel.prefixlen_d = 128;
                        x->sel.prefixlen_s = 128;
-                       ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
-                                      (const struct in6_addr *)saddr);
-                       ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
-                                      (const struct in6_addr *)daddr);
+                       *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr;
+                       *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr;
                        break;
                }
 
index d0a42df..e0d747a 100644 (file)
@@ -28,7 +28,7 @@
 #include <net/netlink.h>
 #include <net/ah.h>
 #include <asm/uaccess.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
 #include <linux/in6.h>
 #endif
 
@@ -150,7 +150,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                break;
 
        case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -201,7 +201,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                        goto out;
                break;
 
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case IPPROTO_DSTOPTS:
        case IPPROTO_ROUTING:
                if (attrs[XFRMA_ALG_COMP]       ||
@@ -1160,7 +1160,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
                break;
 
        case AF_INET6:
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1231,7 +1231,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                switch (ut[i].family) {
                case AF_INET:
                        break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
                case AF_INET6:
                        break;
 #endif
@@ -2604,7 +2604,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
                        return NULL;
                }
                break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
                if (opt != IPV6_XFRM_POLICY) {
                        *dir = -EOPNOTSUPP;
index 893af8a..7bd6f13 100644 (file)
@@ -114,19 +114,20 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
        int offset, ret = 0;
        struct ipv6hdr *ip6;
        u8 nexthdr;
+       __be16 frag_off;
 
        ip6 = ipv6_hdr(skb);
        if (ip6 == NULL)
                return -EINVAL;
-       ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
-       ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+       ad->u.net.v6info.saddr = ip6->saddr;
+       ad->u.net.v6info.daddr = ip6->daddr;
        ret = 0;
        /* IPv6 can have several extension header before the Transport header
         * skip them */
        offset = skb_network_offset(skb);
        offset += sizeof(*ip6);
        nexthdr = ip6->nexthdr;
-       offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+       offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
        if (offset < 0)
                return 0;
        if (proto)
index 1126c10..86305c2 100644 (file)
@@ -1090,7 +1090,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
                        return SECCLASS_NETLINK_ROUTE_SOCKET;
                case NETLINK_FIREWALL:
                        return SECCLASS_NETLINK_FIREWALL_SOCKET;
-               case NETLINK_INET_DIAG:
+               case NETLINK_SOCK_DIAG:
                        return SECCLASS_NETLINK_TCPDIAG_SOCKET;
                case NETLINK_NFLOG:
                        return SECCLASS_NETLINK_NFLOG_SOCKET;
@@ -3561,19 +3561,20 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
        u8 nexthdr;
        int ret = -EINVAL, offset;
        struct ipv6hdr _ipv6h, *ip6;
+       __be16 frag_off;
 
        offset = skb_network_offset(skb);
        ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
        if (ip6 == NULL)
                goto out;
 
-       ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
-       ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+       ad->u.net.v6info.saddr = ip6->saddr;
+       ad->u.net.v6info.daddr = ip6->daddr;
        ret = 0;
 
        nexthdr = ip6->nexthdr;
        offset += sizeof(_ipv6h);
-       offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+       offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
        if (offset < 0)
                goto out;
 
@@ -3871,7 +3872,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                if (family == PF_INET)
                        ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
                else
-                       ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
+                       ad.u.net.v6info.saddr = addr6->sin6_addr;
 
                err = avc_has_perm(sksec->sid, sid,
                                   sksec->sclass, node_perm, &ad);
index 3bf46ab..8636585 100644 (file)
@@ -220,7 +220,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
        case PF_INET6:
                ret = security_node_sid(PF_INET6,
                                        addr, sizeof(struct in6_addr), sid);
-               ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
+               new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
                break;
        default:
                BUG();